From d1218d7309475193b53911667911c4f59a1a7752 Mon Sep 17 00:00:00 2001 From: Garrett D'Amore Date: Sat, 21 Nov 2020 22:11:21 -0800 Subject: New NUTS test framework (NNG Unit Test Support). This is based on testutil/acutest, but is cleaner and fixes some short-comings. We will be adding more support for additional common paradigms to better facilitate transport tests. While here we added some more test cases, and fixed a possible symbol collision in the the stats framework (due to Linux use of a macro definition of "si_value" in a standard OS header). Test coverage may regress slightly as we are no longer using some of the legacy APIs. --- src/testing/CMakeLists.txt | 19 + src/testing/acutest.h | 1645 ++++++++++++++++++++++++++++++++++++++++++++ src/testing/certs.c | 126 ++++ src/testing/marry.c | 312 +++++++++ src/testing/nuts.h | 206 ++++++ src/testing/streams.c | 146 ++++ src/testing/util.c | 164 +++++ 7 files changed, 2618 insertions(+) create mode 100644 src/testing/CMakeLists.txt create mode 100644 src/testing/acutest.h create mode 100644 src/testing/certs.c create mode 100644 src/testing/marry.c create mode 100644 src/testing/nuts.h create mode 100644 src/testing/streams.c create mode 100644 src/testing/util.c (limited to 'src/testing') diff --git a/src/testing/CMakeLists.txt b/src/testing/CMakeLists.txt new file mode 100644 index 00000000..221ca8f2 --- /dev/null +++ b/src/testing/CMakeLists.txt @@ -0,0 +1,19 @@ +# +# Copyright 2020 Staysail Systems, Inc. +# +# This software is supplied under the terms of the MIT License, a +# copy of which should be located in the distribution where this +# file was obtained (LICENSE.txt). A copy of the license may also be +# found online at https://opensource.org/licenses/MIT. +# + +# Protocols. +nng_directory(testing`) + +target_sources(nng_testing PRIVATE + certs.c + marry.c + streams.c + util.c) + +target_include_directories(nng_testing PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/src/testing/acutest.h b/src/testing/acutest.h new file mode 100644 index 00000000..7f0fc059 --- /dev/null +++ b/src/testing/acutest.h @@ -0,0 +1,1645 @@ +/* + * Acutest -- Another C/C++ Unit Test facility + * + * + * Copyright (c) 2013-2019 Martin Mitas + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef ACUTEST_H__ +#define ACUTEST_H__ + + +/************************ + *** Public interface *** + ************************/ + +/* By default, "acutest.h" provides the main program entry point (function + * main()). However, if the test suite is composed of multiple source files + * which include "acutest.h", then this causes a problem of multiple main() + * definitions. To avoid this problem, #define macro TEST_NO_MAIN in all + * compilation units but one. + */ + +/* Macro to specify list of unit tests in the suite. + * The unit test implementation MUST provide list of unit tests it implements + * with this macro: + * + * TEST_LIST = { + * { "test1_name", test1_func_ptr }, + * { "test2_name", test2_func_ptr }, + * ... + * { 0 } + * }; + * + * The list specifies names of each test (must be unique) and pointer to + * a function implementing it. The function does not take any arguments + * and has no return values, i.e. every test function has to be compatible + * with this prototype: + * + * void test_func(void); + */ +#define TEST_LIST const struct test__ test_list__[] + + +/* Macros for testing whether an unit test succeeds or fails. These macros + * can be used arbitrarily in functions implementing the unit tests. + * + * If any condition fails throughout execution of a test, the test fails. + * + * TEST_CHECK takes only one argument (the condition), TEST_CHECK_ allows + * also to specify an error message to print out if the condition fails. + * (It expects printf-like format string and its parameters). The macros + * return non-zero (condition passes) or 0 (condition fails). + * + * That can be useful when more conditions should be checked only if some + * preceding condition passes, as illustrated in this code snippet: + * + * SomeStruct* ptr = allocate_some_struct(); + * if(TEST_CHECK(ptr != NULL)) { + * TEST_CHECK(ptr->member1 < 100); + * TEST_CHECK(ptr->member2 > 200); + * } + */ +#define TEST_CHECK_(cond,...) test_check__((cond), __FILE__, __LINE__, __VA_ARGS__) +#define TEST_CHECK(cond) test_check__((cond), __FILE__, __LINE__, "%s", #cond) + + +/* These macros are the same as TEST_CHECK_ and TEST_CHECK except that if the + * condition fails, the currently executed unit test is immediately aborted. + * + * That is done either by calling abort() if the unit test is executed as a + * child process; or via longjmp() if the unit test is executed within the + * main Acutest process. + * + * As a side effect of such abortion, your unit tests may cause memory leaks, + * unflushed file descriptors, and other fenomena caused by the abortion. + * + * Therefore you should not use these as a general replacement for TEST_CHECK. + * Use it with some caution, especially if your test causes some other side + * effects to the outside world (e.g. communicating with some server, inserting + * into a database etc.). + */ +#define TEST_ASSERT_(cond,...) \ + do { \ + if (!test_check__((cond), __FILE__, __LINE__, __VA_ARGS__)) \ + test_abort__(); \ + } while(0) +#define TEST_ASSERT(cond) \ + do { \ + if (!test_check__((cond), __FILE__, __LINE__, "%s", #cond)) \ + test_abort__(); \ + } while(0) + + +#ifdef __cplusplus +/* Macros to verify that the code (the 1st argument) throws exception of given + * type (the 2nd argument). (Note these macros are only available in C++.) + * + * TEST_EXCEPTION_ is like TEST_EXCEPTION but accepts custom printf-like + * message. + * + * For example: + * + * TEST_EXCEPTION(function_that_throw(), ExpectedExceptionType); + * + * If the function_that_throw() throws ExpectedExceptionType, the check passes. + * If the function throws anything incompatible with ExpectedExceptionType + * (or if it does not thrown an exception at all), the check fails. + */ +#define TEST_EXCEPTION(code, exctype) \ + do { \ + bool exc_ok__ = false; \ + const char *msg__ = NULL; \ + try { \ + code; \ + msg__ = "No exception thrown."; \ + } catch(exctype const&) { \ + exc_ok__= true; \ + } catch(...) { \ + msg__ = "Unexpected exception thrown."; \ + } \ + test_check__(exc_ok__, __FILE__, __LINE__, #code " throws " #exctype); \ + if(msg__ != NULL) \ + test_message__("%s", msg__); \ + } while(0) +#define TEST_EXCEPTION_(code, exctype, ...) \ + do { \ + bool exc_ok__ = false; \ + const char *msg__ = NULL; \ + try { \ + code; \ + msg__ = "No exception thrown."; \ + } catch(exctype const&) { \ + exc_ok__= true; \ + } catch(...) { \ + msg__ = "Unexpected exception thrown."; \ + } \ + test_check__(exc_ok__, __FILE__, __LINE__, __VA_ARGS__); \ + if(msg__ != NULL) \ + test_message__("%s", msg__); \ + } while(0) +#endif /* #ifdef __cplusplus */ + + +/* Sometimes it is useful to split execution of more complex unit tests to some + * smaller parts and associate those parts with some names. + * + * This is especially handy if the given unit test is implemented as a loop + * over some vector of multiple testing inputs. Using these macros allow to use + * sort of subtitle for each iteration of the loop (e.g. outputting the input + * itself or a name associated to it), so that if any TEST_CHECK condition + * fails in the loop, it can be easily seen which iteration triggers the + * failure, without the need to manually output the iteration-specific data in + * every single TEST_CHECK inside the loop body. + * + * TEST_CASE allows to specify only single string as the name of the case, + * TEST_CASE_ provides all the power of printf-like string formatting. + * + * Note that the test cases cannot be nested. Starting a new test case ends + * implicitly the previous one. To end the test case explicitly (e.g. to end + * the last test case after exiting the loop), you may use TEST_CASE(NULL). + */ +#define TEST_CASE_(...) test_case__(__VA_ARGS__) +#define TEST_CASE(name) test_case__("%s", name) + + +/* printf-like macro for outputting an extra information about a failure. + * + * Intended use is to output some computed output versus the expected value, + * e.g. like this: + * + * if(!TEST_CHECK(produced == expected)) { + * TEST_MSG("Expected: %d", expected); + * TEST_MSG("Produced: %d", produced); + * } + * + * Note the message is only written down if the most recent use of any checking + * macro (like e.g. TEST_CHECK or TEST_EXCEPTION) in the current test failed. + * This means the above is equivalent to just this: + * + * TEST_CHECK(produced == expected); + * TEST_MSG("Expected: %d", expected); + * TEST_MSG("Produced: %d", produced); + * + * The macro can deal with multi-line output fairly well. It also automatically + * adds a final new-line if there is none present. + */ +#define TEST_MSG(...) test_message__(__VA_ARGS__) + + +/* Maximal output per TEST_MSG call. Longer messages are cut. + * You may define another limit prior including "acutest.h" + */ +#ifndef TEST_MSG_MAXSIZE + #define TEST_MSG_MAXSIZE 1024 +#endif + + +/* Macro for dumping a block of memory. + * + * Its intended use is very similar to what TEST_MSG is for, but instead of + * generating any printf-like message, this is for dumping raw block of a + * memory in a hexadecimal form: + * + * TEST_CHECK(size_produced == size_expected && memcmp(addr_produced, addr_expected, size_produced) == 0); + * TEST_DUMP("Expected:", addr_expected, size_expected); + * TEST_DUMP("Produced:", addr_produced, size_produced); + */ +#define TEST_DUMP(title, addr, size) test_dump__(title, addr, size) + +/* Maximal output per TEST_DUMP call (in bytes to dump). Longer blocks are cut. + * You may define another limit prior including "acutest.h" + */ +#ifndef TEST_DUMP_MAXSIZE + #define TEST_DUMP_MAXSIZE 1024 +#endif + + +/********************** + *** Implementation *** + **********************/ + +/* The unit test files should not rely on anything below. */ + +#include +#include +#include +#include +#include +#include + +#if defined(unix) || defined(__unix__) || defined(__unix) || defined(__APPLE__) + #define ACUTEST_UNIX__ 1 + #include + #include + #include + #include + #include + #include + #include + + #if defined CLOCK_PROCESS_CPUTIME_ID && defined CLOCK_MONOTONIC + #define ACUTEST_HAS_POSIX_TIMER__ 1 + #endif +#endif + +#if defined(__gnu_linux__) + #define ACUTEST_LINUX__ 1 + #include + #include +#endif + +#if defined(_WIN32) || defined(__WIN32__) || defined(__WINDOWS__) + #define ACUTEST_WIN__ 1 + #include + #include +#endif + +#ifdef __cplusplus + #include +#endif + + +/* Note our global private identifiers end with '__' to mitigate risk of clash + * with the unit tests implementation. */ + + +#ifdef __cplusplus + extern "C" { +#endif + + +struct test__ { + const char* name; + void (*func)(void); +}; + +struct test_detail__ { + unsigned char flags; + double duration; +}; + +enum { + TEST_FLAG_RUN__ = 1 << 0, + TEST_FLAG_SUCCESS__ = 1 << 1, + TEST_FLAG_FAILURE__ = 1 << 2, +}; + +extern const struct test__ test_list__[]; + +int test_check__(int cond, const char* file, int line, const char* fmt, ...); +void test_case__(const char* fmt, ...); +void test_message__(const char* fmt, ...); +void test_dump__(const char* title, const void* addr, size_t size); +void test_abort__(void); + + +#ifndef TEST_NO_MAIN + +static char* test_argv0__ = NULL; +static size_t test_list_size__ = 0; +static struct test_detail__ *test_details__ = NULL; +static size_t test_count__ = 0; +static int test_no_exec__ = -1; +static int test_no_summary__ = 0; +static int test_tap__ = 0; +static int test_skip_mode__ = 0; +static int test_worker__ = 0; +static int test_worker_index__ = 0; +static int test_cond_failed__ = 0; +static FILE *test_xml_output__ = NULL; + +static int test_stat_failed_units__ = 0; +static int test_stat_run_units__ = 0; + +static const struct test__* test_current_unit__ = NULL; +static int test_current_index__ = 0; +static char test_case_name__[64] = ""; +static int test_current_already_logged__ = 0; +static int test_case_current_already_logged__ = 0; +static int test_verbose_level__ = 2; +static int test_current_failures__ = 0; +static int test_colorize__ = 0; +static int test_timer__ = 0; + +static int test_abort_has_jmp_buf__ = 0; +static jmp_buf test_abort_jmp_buf__; + +#if defined ACUTEST_WIN__ + typedef LARGE_INTEGER test_timer_type__; + static LARGE_INTEGER test_timer_freq__; + static test_timer_type__ test_timer_start__; + static test_timer_type__ test_timer_end__; + + static void + test_timer_init__(void) + { + QueryPerformanceFrequency(&test_timer_freq__); + } + + static void + test_timer_get_time__(LARGE_INTEGER* ts) + { + QueryPerformanceCounter(ts); + } + + static double + test_timer_diff__(LARGE_INTEGER start, LARGE_INTEGER end) + { + double duration = (double)(end.QuadPart - start.QuadPart); + duration /= (double)test_timer_freq__.QuadPart; + return duration; + } + + static void + test_timer_print_diff__(void) + { + printf("%.6lf secs", test_timer_diff__(test_timer_start__, test_timer_end__)); + } +#elif defined ACUTEST_HAS_POSIX_TIMER__ + static clockid_t test_timer_id__; + typedef struct timespec test_timer_type__; + static test_timer_type__ test_timer_start__; + static test_timer_type__ test_timer_end__; + + static void + test_timer_init__(void) + { + if(test_timer__ == 1) + test_timer_id__ = CLOCK_MONOTONIC; + else if(test_timer__ == 2) + test_timer_id__ = CLOCK_PROCESS_CPUTIME_ID; + } + + static void + test_timer_get_time__(struct timespec* ts) + { + clock_gettime(test_timer_id__, ts); + } + + static double + test_timer_diff__(struct timespec start, struct timespec end) + { + double endns; + double startns; + + endns = end.tv_sec; + endns *= 1e9; + endns += end.tv_nsec; + + startns = start.tv_sec; + startns *= 1e9; + startns += start.tv_nsec; + + return ((endns - startns)/ 1e9); + } + + static void + test_timer_print_diff__(void) + { + printf("%.6lf secs", + test_timer_diff__(test_timer_start__, test_timer_end__)); + } +#else + typedef int test_timer_type__; + static test_timer_type__ test_timer_start__; + static test_timer_type__ test_timer_end__; + + void + test_timer_init__(void) + {} + + static void + test_timer_get_time__(int* ts) + { + (void) ts; + } + + static double + test_timer_diff__(int start, int end) + { + (void) start; + (void) end; + return 0.0; + } + + static void + test_timer_print_diff__(void) + {} +#endif + +#define TEST_COLOR_DEFAULT__ 0 +#define TEST_COLOR_GREEN__ 1 +#define TEST_COLOR_RED__ 2 +#define TEST_COLOR_DEFAULT_INTENSIVE__ 3 +#define TEST_COLOR_GREEN_INTENSIVE__ 4 +#define TEST_COLOR_RED_INTENSIVE__ 5 + +static int +test_print_in_color__(int color, const char* fmt, ...) +{ + va_list args; + char buffer[256]; + int n; + + va_start(args, fmt); + vsnprintf(buffer, sizeof(buffer), fmt, args); + va_end(args); + buffer[sizeof(buffer)-1] = '\0'; + + if(!test_colorize__) { + return printf("%s", buffer); + } + +#if defined ACUTEST_UNIX__ + { + const char* col_str; + switch(color) { + case TEST_COLOR_GREEN__: col_str = "\033[0;32m"; break; + case TEST_COLOR_RED__: col_str = "\033[0;31m"; break; + case TEST_COLOR_GREEN_INTENSIVE__: col_str = "\033[1;32m"; break; + case TEST_COLOR_RED_INTENSIVE__: col_str = "\033[1;31m"; break; + case TEST_COLOR_DEFAULT_INTENSIVE__: col_str = "\033[1m"; break; + default: col_str = "\033[0m"; break; + } + printf("%s", col_str); + n = printf("%s", buffer); + printf("\033[0m"); + return n; + } +#elif defined ACUTEST_WIN__ + { + HANDLE h; + CONSOLE_SCREEN_BUFFER_INFO info; + WORD attr; + + h = GetStdHandle(STD_OUTPUT_HANDLE); + GetConsoleScreenBufferInfo(h, &info); + + switch(color) { + case TEST_COLOR_GREEN__: attr = FOREGROUND_GREEN; break; + case TEST_COLOR_RED__: attr = FOREGROUND_RED; break; + case TEST_COLOR_GREEN_INTENSIVE__: attr = FOREGROUND_GREEN | FOREGROUND_INTENSITY; break; + case TEST_COLOR_RED_INTENSIVE__: attr = FOREGROUND_RED | FOREGROUND_INTENSITY; break; + case TEST_COLOR_DEFAULT_INTENSIVE__: attr = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_INTENSITY; break; + default: attr = 0; break; + } + if(attr != 0) + SetConsoleTextAttribute(h, attr); + n = printf("%s", buffer); + SetConsoleTextAttribute(h, info.wAttributes); + return n; + } +#else + n = printf("%s", buffer); + return n; +#endif +} + +static void +test_begin_test_line__(const struct test__* test) +{ + if(!test_tap__) { + if(test_verbose_level__ >= 3) { + test_print_in_color__(TEST_COLOR_DEFAULT_INTENSIVE__, "Test %s:\n", test->name); + test_current_already_logged__++; + } else if(test_verbose_level__ >= 1) { + int n; + char spaces[48]; + + n = test_print_in_color__(TEST_COLOR_DEFAULT_INTENSIVE__, "Test %s... ", test->name); + memset(spaces, ' ', sizeof(spaces)); + if(n < (int) sizeof(spaces)) + printf("%.*s", (int) sizeof(spaces) - n, spaces); + } else { + test_current_already_logged__ = 1; + } + } +} + +static void +test_finish_test_line__(int result) +{ + if(test_tap__) { + const char* str = (result == 0) ? "ok" : "not ok"; + + printf("%s %u - %s\n", str, test_current_index__ + 1, test_current_unit__->name); + + if(result == 0 && test_timer__) { + printf("# Duration: "); + test_timer_print_diff__(); + printf("\n"); + } + } else { + int color = (result == 0) ? TEST_COLOR_GREEN_INTENSIVE__ : TEST_COLOR_RED_INTENSIVE__; + const char* str = (result == 0) ? "OK" : "FAILED"; + printf("[ "); + test_print_in_color__(color, str); + printf(" ]"); + + if(result == 0 && test_timer__) { + printf(" "); + test_timer_print_diff__(); + } + + printf("\n"); + } +} + +static void +test_line_indent__(int level) +{ + static const char spaces[] = " "; + int n = level * 2; + + if(test_tap__ && n > 0) { + n--; + printf("#"); + } + + while(n > 16) { + printf("%s", spaces); + n -= 16; + } + printf("%.*s", n, spaces); +} + +int +test_check__(int cond, const char* file, int line, const char* fmt, ...) +{ + const char *result_str; + int result_color; + int verbose_level; + + if(cond) { + result_str = "ok"; + result_color = TEST_COLOR_GREEN__; + verbose_level = 3; + } else { + if(!test_current_already_logged__ && test_current_unit__ != NULL) + test_finish_test_line__(-1); + + result_str = "failed"; + result_color = TEST_COLOR_RED__; + verbose_level = 2; + test_current_failures__++; + test_current_already_logged__++; + } + + if(test_verbose_level__ >= verbose_level) { + va_list args; + + if(!test_case_current_already_logged__ && test_case_name__[0]) { + test_line_indent__(1); + test_print_in_color__(TEST_COLOR_DEFAULT_INTENSIVE__, "Case %s:\n", test_case_name__); + test_current_already_logged__++; + test_case_current_already_logged__++; + } + + test_line_indent__(test_case_name__[0] ? 2 : 1); + if(file != NULL) { + if(test_verbose_level__ < 3) { +#ifdef ACUTEST_WIN__ + const char* lastsep1 = strrchr(file, '\\'); + const char* lastsep2 = strrchr(file, '/'); + if(lastsep1 == NULL) + lastsep1 = file-1; + if(lastsep2 == NULL) + lastsep2 = file-1; + file = (lastsep1 > lastsep2 ? lastsep1 : lastsep2) + 1; +#else + const char* lastsep = strrchr(file, '/'); + if(lastsep != NULL) + file = lastsep+1; +#endif + } + printf("%s:%d: Check ", file, line); + } + + va_start(args, fmt); + vprintf(fmt, args); + va_end(args); + + printf("... "); + test_print_in_color__(result_color, result_str); + printf("\n"); + test_current_already_logged__++; + } + + test_cond_failed__ = (cond == 0); + return !test_cond_failed__; +} + +void +test_case__(const char* fmt, ...) +{ + va_list args; + + if(test_verbose_level__ < 2) + return; + + if(test_case_name__[0]) { + test_case_current_already_logged__ = 0; + test_case_name__[0] = '\0'; + } + + if(fmt == NULL) + return; + + va_start(args, fmt); + vsnprintf(test_case_name__, sizeof(test_case_name__) - 1, fmt, args); + va_end(args); + test_case_name__[sizeof(test_case_name__) - 1] = '\0'; + + if(test_verbose_level__ >= 3) { + test_line_indent__(1); + test_print_in_color__(TEST_COLOR_DEFAULT_INTENSIVE__, "Case %s:\n", test_case_name__); + test_current_already_logged__++; + test_case_current_already_logged__++; + } +} + +void +test_message__(const char* fmt, ...) +{ + char buffer[TEST_MSG_MAXSIZE]; + char* line_beg; + char* line_end; + va_list args; + + if(test_verbose_level__ < 2) + return; + + /* We allow extra message only when something is already wrong in the + * current test. */ + if(test_current_unit__ == NULL || !test_cond_failed__) + return; + + va_start(args, fmt); + vsnprintf(buffer, TEST_MSG_MAXSIZE, fmt, args); + va_end(args); + buffer[TEST_MSG_MAXSIZE-1] = '\0'; + + line_beg = buffer; + while(1) { + line_end = strchr(line_beg, '\n'); + if(line_end == NULL) + break; + test_line_indent__(test_case_name__[0] ? 3 : 2); + printf("%.*s\n", (int)(line_end - line_beg), line_beg); + line_beg = line_end + 1; + } + if(line_beg[0] != '\0') { + test_line_indent__(test_case_name__[0] ? 3 : 2); + printf("%s\n", line_beg); + } +} + +void +test_dump__(const char* title, const void* addr, size_t size) +{ + static const size_t BYTES_PER_LINE = 16; + size_t line_beg; + size_t truncate = 0; + + if(test_verbose_level__ < 2) + return; + + /* We allow extra message only when something is already wrong in the + * current test. */ + if(test_current_unit__ == NULL || !test_cond_failed__) + return; + + if(size > TEST_DUMP_MAXSIZE) { + truncate = size - TEST_DUMP_MAXSIZE; + size = TEST_DUMP_MAXSIZE; + } + + test_line_indent__(test_case_name__[0] ? 3 : 2); + printf((title[strlen(title)-1] == ':') ? "%s\n" : "%s:\n", title); + + for(line_beg = 0; line_beg < size; line_beg += BYTES_PER_LINE) { + size_t line_end = line_beg + BYTES_PER_LINE; + size_t off; + + test_line_indent__(test_case_name__[0] ? 4 : 3); + printf("%08lx: ", (unsigned long)line_beg); + for(off = line_beg; off < line_end; off++) { + if(off < size) + printf(" %02x", ((unsigned char*)addr)[off]); + else + printf(" "); + } + + printf(" "); + for(off = line_beg; off < line_end; off++) { + unsigned char byte = ((unsigned char*)addr)[off]; + if(off < size) + printf("%c", (iscntrl(byte) ? '.' : byte)); + else + break; + } + + printf("\n"); + } + + if(truncate > 0) { + test_line_indent__(test_case_name__[0] ? 4 : 3); + printf(" ... (and more %u bytes)\n", (unsigned) truncate); + } +} + +void +test_abort__(void) +{ + if(test_abort_has_jmp_buf__) + longjmp(test_abort_jmp_buf__, 1); + else + abort(); +} + +static void +test_list_names__(void) +{ + const struct test__* test; + + printf("Unit tests:\n"); + for(test = &test_list__[0]; test->func != NULL; test++) + printf(" %s\n", test->name); +} + +static void +test_remember__(int i) +{ + if(test_details__[i].flags & TEST_FLAG_RUN__) + return; + + test_details__[i].flags |= TEST_FLAG_RUN__; + test_count__++; +} + +static void +test_set_success__(int i, int success) +{ + test_details__[i].flags |= success ? TEST_FLAG_SUCCESS__ : TEST_FLAG_FAILURE__; +} + +static void +test_set_duration__(int i, double duration) +{ + test_details__[i].duration = duration; +} + +static int +test_name_contains_word__(const char* name, const char* pattern) +{ + static const char word_delim[] = " \t-_."; + const char* substr; + size_t pattern_len; + int starts_on_word_boundary; + int ends_on_word_boundary; + + pattern_len = strlen(pattern); + + substr = strstr(name, pattern); + while(substr != NULL) { + starts_on_word_boundary = (substr == name || strchr(word_delim, substr[-1]) != NULL); + ends_on_word_boundary = (substr[pattern_len] == '\0' || strchr(word_delim, substr[pattern_len]) != NULL); + + if(starts_on_word_boundary && ends_on_word_boundary) + return 1; + + substr = strstr(substr+1, pattern); + } + + return 0; +} + +static int +test_lookup__(const char* pattern) +{ + int i; + int n = 0; + + /* Try exact match. */ + for(i = 0; i < (int) test_list_size__; i++) { + if(strcmp(test_list__[i].name, pattern) == 0) { + test_remember__(i); + n++; + break; + } + } + if(n > 0) + return n; + + /* Try word match. */ + for(i = 0; i < (int) test_list_size__; i++) { + if(test_name_contains_word__(test_list__[i].name, pattern)) { + test_remember__(i); + n++; + } + } + if(n > 0) + return n; + + /* Try relaxed match. */ + for(i = 0; i < (int) test_list_size__; i++) { + if(strstr(test_list__[i].name, pattern) != NULL) { + test_remember__(i); + n++; + } + } + + return n; +} + + +/* Called if anything goes bad in Acutest, or if the unit test ends in other + * way then by normal returning from its function (e.g. exception or some + * abnormal child process termination). */ +static void +test_error__(const char* fmt, ...) +{ + va_list args; + + if(test_verbose_level__ == 0) + return; + + if(test_verbose_level__ <= 2 && !test_current_already_logged__ && test_current_unit__ != NULL) { + if(test_tap__) { + test_finish_test_line__(-1); + } else { + printf("[ "); + test_print_in_color__(TEST_COLOR_RED_INTENSIVE__, "FAILED"); + printf(" ]\n"); + } + } + + if(test_verbose_level__ >= 2) { + test_line_indent__(1); + if(test_verbose_level__ >= 3) + test_print_in_color__(TEST_COLOR_RED_INTENSIVE__, "ERROR: "); + va_start(args, fmt); + vprintf(fmt, args); + va_end(args); + printf("\n"); + } + + if(test_verbose_level__ >= 3) { + printf("\n"); + } +} + +/* Call directly the given test unit function. */ +static int +test_do_run__(const struct test__* test, int index) +{ + test_current_unit__ = test; + test_current_index__ = index; + test_current_failures__ = 0; + test_current_already_logged__ = 0; + test_cond_failed__ = 0; + + test_begin_test_line__(test); + +#ifdef __cplusplus + try { +#endif + + /* This is good to do for case the test unit e.g. crashes. */ + fflush(stdout); + fflush(stderr); + + if(!test_worker__) { + test_abort_has_jmp_buf__ = 1; + if(setjmp(test_abort_jmp_buf__) != 0) + goto aborted; + } + + test_timer_get_time__(&test_timer_start__); + test->func(); +aborted: + test_abort_has_jmp_buf__ = 0; + test_timer_get_time__(&test_timer_end__); + + if(test_verbose_level__ >= 3) { + test_line_indent__(1); + if(test_current_failures__ == 0) { + test_print_in_color__(TEST_COLOR_GREEN_INTENSIVE__, "SUCCESS: "); + printf("All conditions have passed.\n"); + + if(test_timer__) { + test_line_indent__(1); + printf("Duration: "); + test_timer_print_diff__(); + printf("\n"); + } + } else { + test_print_in_color__(TEST_COLOR_RED_INTENSIVE__, "FAILED: "); + printf("%d condition%s %s failed.\n", + test_current_failures__, + (test_current_failures__ == 1) ? "" : "s", + (test_current_failures__ == 1) ? "has" : "have"); + } + printf("\n"); + } else if(test_verbose_level__ >= 1 && test_current_failures__ == 0) { + test_finish_test_line__(0); + } + + test_case__(NULL); + test_current_unit__ = NULL; + return (test_current_failures__ == 0) ? 0 : -1; + +#ifdef __cplusplus + } catch(std::exception& e) { + const char* what = e.what(); + test_check__(0, NULL, 0, "Threw std::exception"); + if(what != NULL) + test_message__("std::exception::what(): %s", what); + return -1; + } catch(...) { + test_check__(0, NULL, 0, "Threw an exception"); + return -1; + } +#endif +} + +/* Trigger the unit test. If possible (and not suppressed) it starts a child + * process who calls test_do_run__(), otherwise it calls test_do_run__() + * directly. */ +static void +test_run__(const struct test__* test, int index, int master_index) +{ + int failed = 1; + test_timer_type__ start, end; + + test_current_unit__ = test; + test_current_already_logged__ = 0; + test_timer_get_time__(&start); + + if(!test_no_exec__) { + +#if defined(ACUTEST_UNIX__) + + pid_t pid; + int exit_code; + + /* Make sure the child starts with empty I/O buffers. */ + fflush(stdout); + fflush(stderr); + + pid = fork(); + if(pid == (pid_t)-1) { + test_error__("Cannot fork. %s [%d]", strerror(errno), errno); + failed = 1; + } else if(pid == 0) { + /* Child: Do the test. */ + test_worker__ = 1; + failed = (test_do_run__(test, index) != 0); + exit(failed ? 1 : 0); + } else { + /* Parent: Wait until child terminates and analyze its exit code. */ + waitpid(pid, &exit_code, 0); + if(WIFEXITED(exit_code)) { + switch(WEXITSTATUS(exit_code)) { + case 0: failed = 0; break; /* test has passed. */ + case 1: /* noop */ break; /* "normal" failure. */ + default: test_error__("Unexpected exit code [%d]", WEXITSTATUS(exit_code)); + } + } else if(WIFSIGNALED(exit_code)) { + char tmp[32]; + const char* signame; + switch(WTERMSIG(exit_code)) { + case SIGINT: signame = "SIGINT"; break; + case SIGHUP: signame = "SIGHUP"; break; + case SIGQUIT: signame = "SIGQUIT"; break; + case SIGABRT: signame = "SIGABRT"; break; + case SIGKILL: signame = "SIGKILL"; break; + case SIGSEGV: signame = "SIGSEGV"; break; + case SIGILL: signame = "SIGILL"; break; + case SIGTERM: signame = "SIGTERM"; break; + default: sprintf(tmp, "signal %d", WTERMSIG(exit_code)); signame = tmp; break; + } + test_error__("Test interrupted by %s", signame); + } else { + test_error__("Test ended in an unexpected way [%d]", exit_code); + } + } + +#elif defined(ACUTEST_WIN__) + + char buffer[512] = {0}; + STARTUPINFOA startupInfo; + PROCESS_INFORMATION processInfo; + DWORD exitCode; + + /* Windows has no fork(). So we propagate all info into the child + * through a command line arguments. */ + _snprintf(buffer, sizeof(buffer)-1, + "%s --worker=%d %s --no-exec --no-summary %s --verbose=%d --color=%s -- \"%s\"", + test_argv0__, index, test_timer__ ? "--timer" : "", + test_tap__ ? "--tap" : "", test_verbose_level__, + test_colorize__ ? "always" : "never", + test->name); + memset(&startupInfo, 0, sizeof(startupInfo)); + startupInfo.cb = sizeof(STARTUPINFO); + if(CreateProcessA(NULL, buffer, NULL, NULL, FALSE, 0, NULL, NULL, &startupInfo, &processInfo)) { + WaitForSingleObject(processInfo.hProcess, INFINITE); + GetExitCodeProcess(processInfo.hProcess, &exitCode); + CloseHandle(processInfo.hThread); + CloseHandle(processInfo.hProcess); + failed = (exitCode != 0); + } else { + test_error__("Cannot create unit test subprocess [%ld].", GetLastError()); + failed = 1; + } + +#else + + /* A platform where we don't know how to run child process. */ + failed = (test_do_run__(test, index) != 0); + +#endif + + } else { + /* Child processes suppressed through --no-exec. */ + failed = (test_do_run__(test, index) != 0); + } + test_timer_get_time__(&end); + + test_current_unit__ = NULL; + + test_stat_run_units__++; + if(failed) + test_stat_failed_units__++; + + test_set_success__(master_index, !failed); + test_set_duration__(master_index, test_timer_diff__(start, end)); +} + +#if defined(ACUTEST_WIN__) +/* Callback for SEH events. */ +static LONG CALLBACK +test_seh_exception_filter__(EXCEPTION_POINTERS *ptrs) +{ + test_check__(0, NULL, 0, "Unhandled SEH exception"); + test_message__("Exception code: 0x%08lx", ptrs->ExceptionRecord->ExceptionCode); + test_message__("Exception address: 0x%p", ptrs->ExceptionRecord->ExceptionAddress); + + fflush(stdout); + fflush(stderr); + + return EXCEPTION_EXECUTE_HANDLER; +} +#endif + + +#define TEST_CMDLINE_OPTFLAG_OPTIONALARG__ 0x0001 +#define TEST_CMDLINE_OPTFLAG_REQUIREDARG__ 0x0002 + +#define TEST_CMDLINE_OPTID_NONE__ 0 +#define TEST_CMDLINE_OPTID_UNKNOWN__ (-0x7fffffff + 0) +#define TEST_CMDLINE_OPTID_MISSINGARG__ (-0x7fffffff + 1) +#define TEST_CMDLINE_OPTID_BOGUSARG__ (-0x7fffffff + 2) + +typedef struct TEST_CMDLINE_OPTION__ { + char shortname; + const char* longname; + int id; + unsigned flags; +} TEST_CMDLINE_OPTION__; + +static int +test_cmdline_handle_short_opt_group__(const TEST_CMDLINE_OPTION__* options, + const char* arggroup, + int (*callback)(int /*optval*/, const char* /*arg*/)) +{ + const TEST_CMDLINE_OPTION__* opt; + int i; + int ret = 0; + + for(i = 0; arggroup[i] != '\0'; i++) { + for(opt = options; opt->id != 0; opt++) { + if(arggroup[i] == opt->shortname) + break; + } + + if(opt->id != 0 && !(opt->flags & TEST_CMDLINE_OPTFLAG_REQUIREDARG__)) { + ret = callback(opt->id, NULL); + } else { + /* Unknown option. */ + char badoptname[3]; + badoptname[0] = '-'; + badoptname[1] = arggroup[i]; + badoptname[2] = '\0'; + ret = callback((opt->id != 0 ? TEST_CMDLINE_OPTID_MISSINGARG__ : TEST_CMDLINE_OPTID_UNKNOWN__), + badoptname); + } + + if(ret != 0) + break; + } + + return ret; +} + +#define TEST_CMDLINE_AUXBUF_SIZE__ 32 + +static int +test_cmdline_read__(const TEST_CMDLINE_OPTION__* options, int argc, char** argv, + int (*callback)(int /*optval*/, const char* /*arg*/)) +{ + + const TEST_CMDLINE_OPTION__* opt; + char auxbuf[TEST_CMDLINE_AUXBUF_SIZE__+1]; + int after_doubledash = 0; + int i = 1; + int ret = 0; + + auxbuf[TEST_CMDLINE_AUXBUF_SIZE__] = '\0'; + + while(i < argc) { + if(after_doubledash || strcmp(argv[i], "-") == 0) { + /* Non-option argument. */ + ret = callback(TEST_CMDLINE_OPTID_NONE__, argv[i]); + } else if(strcmp(argv[i], "--") == 0) { + /* End of options. All the remaining members are non-option arguments. */ + after_doubledash = 1; + } else if(argv[i][0] != '-') { + /* Non-option argument. */ + ret = callback(TEST_CMDLINE_OPTID_NONE__, argv[i]); + } else { + for(opt = options; opt->id != 0; opt++) { + if(opt->longname != NULL && strncmp(argv[i], "--", 2) == 0) { + size_t len = strlen(opt->longname); + if(strncmp(argv[i]+2, opt->longname, len) == 0) { + /* Regular long option. */ + if(argv[i][2+len] == '\0') { + /* with no argument provided. */ + if(!(opt->flags & TEST_CMDLINE_OPTFLAG_REQUIREDARG__)) + ret = callback(opt->id, NULL); + else + ret = callback(TEST_CMDLINE_OPTID_MISSINGARG__, argv[i]); + break; + } else if(argv[i][2+len] == '=') { + /* with an argument provided. */ + if(opt->flags & (TEST_CMDLINE_OPTFLAG_OPTIONALARG__ | TEST_CMDLINE_OPTFLAG_REQUIREDARG__)) { + ret = callback(opt->id, argv[i]+2+len+1); + } else { + sprintf(auxbuf, "--%s", opt->longname); + ret = callback(TEST_CMDLINE_OPTID_BOGUSARG__, auxbuf); + } + break; + } else { + continue; + } + } + } else if(opt->shortname != '\0' && argv[i][0] == '-') { + if(argv[i][1] == opt->shortname) { + /* Regular short option. */ + if(opt->flags & TEST_CMDLINE_OPTFLAG_REQUIREDARG__) { + if(argv[i][2] != '\0') + ret = callback(opt->id, argv[i]+2); + else if(i+1 < argc) + ret = callback(opt->id, argv[++i]); + else + ret = callback(TEST_CMDLINE_OPTID_MISSINGARG__, argv[i]); + break; + } else { + ret = callback(opt->id, NULL); + + /* There might be more (argument-less) short options + * grouped together. */ + if(ret == 0 && argv[i][2] != '\0') + ret = test_cmdline_handle_short_opt_group__(options, argv[i]+2, callback); + break; + } + } + } + } + + if(opt->id == 0) { /* still not handled? */ + if(argv[i][0] != '-') { + /* Non-option argument. */ + ret = callback(TEST_CMDLINE_OPTID_NONE__, argv[i]); + } else { + /* Unknown option. */ + char* badoptname = argv[i]; + + if(strncmp(badoptname, "--", 2) == 0) { + /* Strip any argument from the long option. */ + char* assignment = strchr(badoptname, '='); + if(assignment != NULL) { + size_t len = assignment - badoptname; + if(len > TEST_CMDLINE_AUXBUF_SIZE__) + len = TEST_CMDLINE_AUXBUF_SIZE__; + strncpy(auxbuf, badoptname, len); + auxbuf[len] = '\0'; + badoptname = auxbuf; + } + } + + ret = callback(TEST_CMDLINE_OPTID_UNKNOWN__, badoptname); + } + } + } + + if(ret != 0) + return ret; + i++; + } + + return ret; +} + +static void +test_help__(void) +{ + printf("Usage: %s [options] [test...]\n", test_argv0__); + printf("\n"); + printf("Run the specified unit tests; or if the option '--skip' is used, run all\n"); + printf("tests in the suite but those listed. By default, if no tests are specified\n"); + printf("on the command line, all unit tests in the suite are run.\n"); + printf("\n"); + printf("Options:\n"); + printf(" -s, --skip Execute all unit tests but the listed ones\n"); + printf(" --exec[=WHEN] If supported, execute unit tests as child processes\n"); + printf(" (WHEN is one of 'auto', 'always', 'never')\n"); +#if defined ACUTEST_WIN__ + printf(" -t, --timer Measure test duration\n"); +#elif defined ACUTEST_HAS_POSIX_TIMER__ + printf(" -t, --timer Measure test duration (real time)\n"); + printf(" --timer=TIMER Measure test duration, using given timer\n"); + printf(" (TIMER is one of 'real', 'cpu')\n"); +#endif + printf(" -E, --no-exec Same as --exec=never\n"); + printf(" --no-summary Suppress printing of test results summary\n"); + printf(" --tap Produce TAP-compliant output\n"); + printf(" (See https://testanything.org/)\n"); + printf(" -x, --xml-output=FILE Enable XUnit output to the given file\n"); + printf(" -l, --list List unit tests in the suite and exit\n"); + printf(" -v, --verbose Make output more verbose\n"); + printf(" --verbose=LEVEL Set verbose level to LEVEL:\n"); + printf(" 0 ... Be silent\n"); + printf(" 1 ... Output one line per test (and summary)\n"); + printf(" 2 ... As 1 and failed conditions (this is default)\n"); + printf(" 3 ... As 1 and all conditions (and extended summary)\n"); + printf(" --color[=WHEN] Enable colorized output\n"); + printf(" (WHEN is one of 'auto', 'always', 'never')\n"); + printf(" --no-color Same as --color=never\n"); + printf(" -h, --help Display this help and exit\n"); + + if(test_list_size__ < 16) { + printf("\n"); + test_list_names__(); + } +} + +static const TEST_CMDLINE_OPTION__ test_cmdline_options__[] = { + { 's', "skip", 's', 0 }, + { 0, "exec", 'e', TEST_CMDLINE_OPTFLAG_OPTIONALARG__ }, + { 'E', "no-exec", 'E', 0 }, +#if defined ACUTEST_WIN__ + { 't', "timer", 't', 0 }, +#elif defined ACUTEST_HAS_POSIX_TIMER__ + { 't', "timer", 't', TEST_CMDLINE_OPTFLAG_OPTIONALARG__ }, +#endif + { 0, "no-summary", 'S', 0 }, + { 0, "tap", 'T', 0 }, + { 'l', "list", 'l', 0 }, + { 'v', "verbose", 'v', TEST_CMDLINE_OPTFLAG_OPTIONALARG__ }, + { 0, "color", 'c', TEST_CMDLINE_OPTFLAG_OPTIONALARG__ }, + { 0, "no-color", 'C', 0 }, + { 'h', "help", 'h', 0 }, + { 0, "worker", 'w', TEST_CMDLINE_OPTFLAG_REQUIREDARG__ }, /* internal */ + { 'x', "xml-output", 'x', TEST_CMDLINE_OPTFLAG_REQUIREDARG__ }, + { 0, NULL, 0, 0 } +}; + +static int +test_cmdline_callback__(int id, const char* arg) +{ + switch(id) { + case 's': + test_skip_mode__ = 1; + break; + + case 'e': + if(arg == NULL || strcmp(arg, "always") == 0) { + test_no_exec__ = 0; + } else if(strcmp(arg, "never") == 0) { + test_no_exec__ = 1; + } else if(strcmp(arg, "auto") == 0) { + /*noop*/ + } else { + fprintf(stderr, "%s: Unrecognized argument '%s' for option --exec.\n", test_argv0__, arg); + fprintf(stderr, "Try '%s --help' for more information.\n", test_argv0__); + exit(2); + } + break; + + case 'E': + test_no_exec__ = 1; + break; + + case 't': +#if defined ACUTEST_WIN__ || defined ACUTEST_HAS_POSIX_TIMER__ + if(arg == NULL || strcmp(arg, "real") == 0) { + test_timer__ = 1; + #ifndef ACUTEST_WIN__ + } else if(strcmp(arg, "cpu") == 0) { + test_timer__ = 2; + #endif + } else { + fprintf(stderr, "%s: Unrecognized argument '%s' for option --timer.\n", test_argv0__, arg); + fprintf(stderr, "Try '%s --help' for more information.\n", test_argv0__); + exit(2); + } +#endif + break; + + case 'S': + test_no_summary__ = 1; + break; + + case 'T': + test_tap__ = 1; + break; + + case 'l': + test_list_names__(); + exit(0); + + case 'v': + test_verbose_level__ = (arg != NULL ? atoi(arg) : test_verbose_level__+1); + break; + + case 'c': + if(arg == NULL || strcmp(arg, "always") == 0) { + test_colorize__ = 1; + } else if(strcmp(arg, "never") == 0) { + test_colorize__ = 0; + } else if(strcmp(arg, "auto") == 0) { + /*noop*/ + } else { + fprintf(stderr, "%s: Unrecognized argument '%s' for option --color.\n", test_argv0__, arg); + fprintf(stderr, "Try '%s --help' for more information.\n", test_argv0__); + exit(2); + } + break; + + case 'C': + test_colorize__ = 0; + break; + + case 'h': + test_help__(); + exit(0); + + case 'w': + test_worker__ = 1; + test_worker_index__ = atoi(arg); + break; + case 'x': + test_xml_output__ = fopen(arg, "w"); + if (!test_xml_output__) { + fprintf(stderr, "Unable to open '%s': %s\n", arg, strerror(errno)); + exit(2); + } + break; + + case 0: + if(test_lookup__(arg) == 0) { + fprintf(stderr, "%s: Unrecognized unit test '%s'\n", test_argv0__, arg); + fprintf(stderr, "Try '%s --list' for list of unit tests.\n", test_argv0__); + exit(2); + } + break; + + case TEST_CMDLINE_OPTID_UNKNOWN__: + fprintf(stderr, "Unrecognized command line option '%s'.\n", arg); + fprintf(stderr, "Try '%s --help' for more information.\n", test_argv0__); + exit(2); + + case TEST_CMDLINE_OPTID_MISSINGARG__: + fprintf(stderr, "The command line option '%s' requires an argument.\n", arg); + fprintf(stderr, "Try '%s --help' for more information.\n", test_argv0__); + exit(2); + + case TEST_CMDLINE_OPTID_BOGUSARG__: + fprintf(stderr, "The command line option '%s' does not expect an argument.\n", arg); + fprintf(stderr, "Try '%s --help' for more information.\n", test_argv0__); + exit(2); + } + + return 0; +} + + +#ifdef ACUTEST_LINUX__ +static int +test_is_tracer_present__(void) +{ + char buf[256+32+1]; + int tracer_present = 0; + int fd; + ssize_t n_read; + + fd = open("/proc/self/status", O_RDONLY); + if(fd == -1) + return 0; + + n_read = read(fd, buf, sizeof(buf)-1); + while(n_read > 0) { + static const char pattern[] = "TracerPid:"; + const char* field; + + buf[n_read] = '\0'; + field = strstr(buf, pattern); + if(field != NULL && field < buf + sizeof(buf) - 32) { + pid_t tracer_pid = (pid_t) atoi(field + sizeof(pattern) - 1); + tracer_present = (tracer_pid != 0); + break; + } + + if(n_read == sizeof(buf)-1) { + memmove(buf, buf + sizeof(buf)-1 - 32, 32); + n_read = read(fd, buf+32, sizeof(buf)-1-32); + if(n_read > 0) + n_read += 32; + } + } + + close(fd); + return tracer_present; +} +#endif + +int +main(int argc, char** argv) +{ + int i; + test_argv0__ = argv[0]; + +#if defined ACUTEST_UNIX__ + test_colorize__ = isatty(STDOUT_FILENO); +#elif defined ACUTEST_WIN__ + #if defined __BORLANDC__ + test_colorize__ = isatty(_fileno(stdout)); + #else + test_colorize__ = _isatty(_fileno(stdout)); + #endif +#else + test_colorize__ = 0; +#endif + + test_timer_init__(); + + /* Count all test units */ + test_list_size__ = 0; + for(i = 0; test_list__[i].func != NULL; i++) + test_list_size__++; + + test_details__ = (struct test_detail__*)calloc(test_list_size__, sizeof(struct test_detail__)); + if(test_details__ == NULL) { + fprintf(stderr, "Out of memory.\n"); + exit(2); + } + + /* Parse options */ + test_cmdline_read__(test_cmdline_options__, argc, argv, test_cmdline_callback__); + +#if defined(ACUTEST_WIN__) + SetUnhandledExceptionFilter(test_seh_exception_filter__); +#endif + + /* By default, we want to run all tests. */ + if(test_count__ == 0) { + for(i = 0; test_list__[i].func != NULL; i++) + test_remember__(i); + } + + /* Guess whether we want to run unit tests as child processes. */ + if(test_no_exec__ < 0) { + test_no_exec__ = 0; + + if(test_count__ <= 1) { + test_no_exec__ = 1; + } else { +#ifdef ACUTEST_WIN__ + if(IsDebuggerPresent()) + test_no_exec__ = 1; +#endif +#ifdef ACUTEST_LINUX__ + if(test_is_tracer_present__()) + test_no_exec__ = 1; +#endif + } + } + + if(test_tap__) { + /* TAP requires we know test result ("ok", "not ok") before we output + * anything about the test, and this gets problematic for larger verbose + * levels. */ + if(test_verbose_level__ > 2) + test_verbose_level__ = 2; + + /* TAP harness should provide some summary. */ + test_no_summary__ = 1; + + if(!test_worker__) + printf("1..%d\n", (int) test_count__); + } + + int index = test_worker_index__; + for(i = 0; test_list__[i].func != NULL; i++) { + int run = (test_details__[i].flags & TEST_FLAG_RUN__); + if (test_skip_mode__) /* Run all tests except those listed. */ + run = !run; + if(run) + test_run__(&test_list__[i], index++, i); + } + + /* Write a summary */ + if(!test_no_summary__ && test_verbose_level__ >= 1) { + if(test_verbose_level__ >= 3) { + test_print_in_color__(TEST_COLOR_DEFAULT_INTENSIVE__, "Summary:\n"); + + printf(" Count of all unit tests: %4d\n", (int) test_list_size__); + printf(" Count of run unit tests: %4d\n", test_stat_run_units__); + printf(" Count of failed unit tests: %4d\n", test_stat_failed_units__); + printf(" Count of skipped unit tests: %4d\n", (int) test_list_size__ - test_stat_run_units__); + } + + if(test_stat_failed_units__ == 0) { + test_print_in_color__(TEST_COLOR_GREEN_INTENSIVE__, "SUCCESS:"); + printf(" All unit tests have passed.\n"); + } else { + test_print_in_color__(TEST_COLOR_RED_INTENSIVE__, "FAILED:"); + printf(" %d of %d unit tests %s failed.\n", + test_stat_failed_units__, test_stat_run_units__, + (test_stat_failed_units__ == 1) ? "has" : "have"); + } + + if(test_verbose_level__ >= 3) + printf("\n"); + } + + if (test_xml_output__) { +#if defined ACUTEST_UNIX__ + char *suite_name = basename(argv[0]); +#elif defined ACUTEST_WIN__ + char suite_name[_MAX_FNAME]; + _splitpath(argv[0], NULL, NULL, suite_name, NULL); +#else + const char *suite_name = argv[0]; +#endif + fprintf(test_xml_output__, "\n"); + fprintf(test_xml_output__, "\n", + suite_name, (int)test_list_size__, test_stat_failed_units__, test_stat_failed_units__, + (int)test_list_size__ - test_stat_run_units__); + for(i = 0; test_list__[i].func != NULL; i++) { + struct test_detail__ *details = &test_details__[i]; + fprintf(test_xml_output__, " \n", test_list__[i].name, details->duration); + if (details->flags & TEST_FLAG_FAILURE__) + fprintf(test_xml_output__, " \n"); + if (!(details->flags & TEST_FLAG_FAILURE__) && !(details->flags & TEST_FLAG_SUCCESS__)) + fprintf(test_xml_output__, " \n"); + fprintf(test_xml_output__, " \n"); + } + fprintf(test_xml_output__, "\n"); + fclose(test_xml_output__); + } + + free((void*) test_details__); + + return (test_stat_failed_units__ == 0) ? 0 : 1; +} + + +#endif /* #ifndef TEST_NO_MAIN */ + +#ifdef __cplusplus + } /* extern "C" */ +#endif + + +#endif /* #ifndef ACUTEST_H__ */ diff --git a/src/testing/certs.c b/src/testing/certs.c new file mode 100644 index 00000000..e2ed94a0 --- /dev/null +++ b/src/testing/certs.c @@ -0,0 +1,126 @@ +// +// Copyright 2020 Staysail Systems, Inc. +// Copyright 2018 Capitar IT Group BV +// +// This software is supplied under the terms of the MIT License, a +// copy of which should be located in the distribution where this +// file was obtained (LICENSE.txt). A copy of the license may also be +// found online at https://opensource.org/licenses/MIT. +// + +// TLS certificates. These are pre-generated, and should not be used outside +// of these test cases. They are all using RSA 2048 with SHA256. +// All certs are signed by the root key (making the root self-signed). +// They all expire in about 100 years -- so we don't have to worry about +// expiration. +// +// The server cert uses CN 127.0.0.1. +// +// Country = XX +// State = Utopia +// Locality = Paradise +// Organization = NNG Tests, Inc. +// + +const char *nuts_server_key = + "-----BEGIN RSA PRIVATE KEY-----\n" + "MIIEowIBAAKCAQEAyPdnRbMrQj9902TGQsmMbG6xTSl9XKbJr55BcnyZifsrqA7B\n" + "bNSkndVw9Qq+OJQIDBTfRhGdG+o9j3h6SDVvIb62fWtwJ5Fe0eUmeYwPc1PKQzOm\n" + "MFlMYekXiZsx60yu5LeuUhGlb84+csImH+m3NbutInPJcStSq0WfSV6VNk6DN353\n" + "5ex66zV2Ms6ikys1vCC434YqIpe1VxUh+IC2widJcLDCxmmJt3TOlx5f9OcKMkxu\n" + "H4fMAzgjIEpIrUjdb19CGNVvsNrEEB2CShBMgBdqMaAnKFxpKgfzS0JFulxRGNtp\n" + "srweki+j+a4sJXTv40kELkRQS6uB6wWZNjcPywIDAQABAoIBAQCGSUsot+BgFCzv\n" + "5JbWafb7Pbwb421xS8HZJ9Zzue6e1McHNVTqc+zLyqQAGX2iMMhvykKnf32L+anJ\n" + "BKgxOANaeSVYCUKYLfs+JfDfp0druMGexhR2mjT/99FSkfF5WXREQLiq/j+dxiLU\n" + "bActq+5QaWf3bYddp6VF7O/TBvCNqBfD0+S0o0wtBdvxXItrKPTD5iKr9JfLWdAt\n" + "YNAk2QgFywFtY5zc2wt4queghF9GHeBzzZCuVj9QvPA4WdVq0mePaPTmvTYQUD0j\n" + "GT6X5j9JhqCwfh7trb/HfkmLHwwc62zPDFps+Dxao80+vss5b/EYZ4zY3S/K3vpG\n" + "f/e42S2BAoGBAP51HQYFJGC/wsNtOcX8RtXnRo8eYmyboH6MtBFrZxWl6ERigKCN\n" + "5Tjni7EI3nwi3ONg0ENPFkoQ8h0bcVFS7iW5kz5te73WaOFtpkU9rmuFDUz37eLP\n" + "d+JLZ5Kwfn2FM9HoiSAZAHowE0MIlmmIEXSnFtqA2zzorPQLO/4QlR+VAoGBAMov\n" + "R0yaHg3qPlxmCNyLXKiGaGNzvsvWjYw825uCGmVZfhzDhOiCFMaMb51BS5Uw/gwm\n" + "zHxmJjoqak8JjxaQ1qKPoeY1TJ5ps1+TRq9Wzm2/zGqJHOXnRPlqwBQ6AFllAMgt\n" + "Rlp5uqb8QJ+YEo6/1kdGhw9kZWCZEEue6MNQjxnfAoGARLkUkZ+p54di7qz9QX+V\n" + "EghYgibOpk6R1hviNiIvwSUByhZgbvxjwC6pB7NBg31W8wIevU8K0g4plbrnq/Md\n" + "5opsPhwLo4XY5albkq/J/7f7k6ISWYN2+WMsIe4Q+42SJUsMXeLiwh1h1mTnWrEp\n" + "JbxK69CJZbXhoDe4iDGqVNECgYAjlgS3n9ywWE1XmAHxR3osk1OmRYYMfJv3VfLV\n" + "QSYCNqkyyNsIzXR4qdkvVYHHJZNhcibFsnkB/dsuRCFyOFX+0McPLMxqiXIv3U0w\n" + "qVe2C28gRTfX40fJmpdqN/c9xMBJe2aJoClRIM8DCBIkG/HMI8a719DcGrS6iqKv\n" + "VeuKAwKBgEgD+KWW1KtoSjCBlS0NP8HjC/Rq7j99YhKE6b9h2slIa7JTO8RZKCa0\n" + "qbuomdUeJA3R8h+5CFkEKWqO2/0+dUdLNOjG+CaTFHaUJevzHOzIjpn+VsfCLV13\n" + "yupGzHG+tGtdrWgLn9Dzdp67cDfSnsSh+KODPECAAFfo+wPvD8DS\n" + "-----END RSA PRIVATE KEY-----\n"; + +const char *nuts_server_crt = + "-----BEGIN CERTIFICATE-----\n" + "MIIDRzCCAi8CFCOIJGs6plMawgBYdDuCRV7UuJuyMA0GCSqGSIb3DQEBCwUAMF8x\n" + "CzAJBgNVBAYTAlhYMQ8wDQYDVQQIDAZVdG9waWExETAPBgNVBAcMCFBhcmFkaXNl\n" + "MRgwFgYDVQQKDA9OTkcgVGVzdHMsIEluYy4xEjAQBgNVBAMMCWxvY2FsaG9zdDAg\n" + "Fw0yMDA1MjMyMzMxMTlaGA8yMTIwMDQyOTIzMzExOVowXzELMAkGA1UEBhMCWFgx\n" + "DzANBgNVBAgMBlV0b3BpYTERMA8GA1UEBwwIUGFyYWRpc2UxGDAWBgNVBAoMD05O\n" + "RyBUZXN0cywgSW5jLjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0B\n" + "AQEFAAOCAQ8AMIIBCgKCAQEAyPdnRbMrQj9902TGQsmMbG6xTSl9XKbJr55BcnyZ\n" + "ifsrqA7BbNSkndVw9Qq+OJQIDBTfRhGdG+o9j3h6SDVvIb62fWtwJ5Fe0eUmeYwP\n" + "c1PKQzOmMFlMYekXiZsx60yu5LeuUhGlb84+csImH+m3NbutInPJcStSq0WfSV6V\n" + "Nk6DN3535ex66zV2Ms6ikys1vCC434YqIpe1VxUh+IC2widJcLDCxmmJt3TOlx5f\n" + "9OcKMkxuH4fMAzgjIEpIrUjdb19CGNVvsNrEEB2CShBMgBdqMaAnKFxpKgfzS0JF\n" + "ulxRGNtpsrweki+j+a4sJXTv40kELkRQS6uB6wWZNjcPywIDAQABMA0GCSqGSIb3\n" + "DQEBCwUAA4IBAQA86Fqrd4aiih6R3fwiMLwV6IQJv+u5rQeqA4D0xu6v6siP42SJ\n" + "YMaI2DkNGrWdSFVSHUK/efceCrhnMlW7VM8I1cyl2F/qKMfnT72cxqqquiKtQKdT\n" + "NDTzv61QMUP9n86HxMzGS7jg0Pknu55BsIRNK6ndDvI3D/K/rzZs4xbqWSSfNfQs\n" + "fNFBbOuDrkS6/1h3p8SY1uPM18WLVv3GO2T3aeNMHn7YJAKSn+sfaxzAPyPIK3UT\n" + "W8ecGQSHOqBJJQELyUfMu7lx/FCYKUhN7/1uhU5Qf1pCR8hkIMegtqr64yVBNMOn\n" + "248fuiHbs9BRknuA/PqjxIDDZTwtDrfVSO/S\n" + "-----END CERTIFICATE-----\n"; + +const char *nuts_client_key = + "-----BEGIN RSA PRIVATE KEY-----\n" + "MIIEowIBAAKCAQEArohAOr7gv5aNpTEviOPPBJ2fArUX2EajMEtU9tF8H/TTlcMB\n" + "oy+vYoyNe56jc7CWUfO0S54rg0XaQ7HTI5EWueSR9wrEVK4q+Zg6x1dwr4k5SxD5\n" + "NcStDXzUjiCi9ygZRxpOUz8jRhKZFENuCdLxSN7E2vuOIU9IR5FpatMlsD33rTOX\n" + "Pgyx7qNpBj63ZCzY3b09zWBAXc/sLd1mxjlNP/LbtVLrFeIT1j6Gv0UgzxIcEjQ3\n" + "vybV/EYK7THn7jLhudEa+7fC9jfzwozbuszfEje/U0h0/DF4coGyIQTfDh6Wmk3x\n" + "5YB2QaI/0jwn8cwracKGtNO+vLqV4yUWZxf5xwIDAQABAoIBADXIEJrJpPIEz6Me\n" + "0/oH0QwoEg7AhReFNNY18HmaNfiW9fhJPiDuGcpxa0uzp8SHntqkEWPX2qq6BLcl\n" + "fd2Q4QLpdz08GSHLBb0h9sLko/nDfF8wXMr/zx+/3rPpRK6KsbdiWM54P0NhicBf\n" + "wvHOCcIdu2WLbNHA35IGMgjUBeIXxAsje63RBS3Dd6RnASxF7bbC/GXiUouQnos1\n" + "VSLoR6fLQQYlrMOAJU3ruPvMRwkrgaHQ1jl3PL4ilZMuvt7LSAi/KUDKMLRHdLNe\n" + "tMPITE5CvQ/rBhiUHMsTn1Xb2/jmSuJieJtG2fEDmLFuYZMUFMg1XfQ+ZC9cDCGI\n" + "wiEYUbkCgYEA1NoKnHp7Zmc2AK1J78uYByEwr8z2nAYZNq5LFeKuCd4rky6v385x\n" + "IjIYhvPLiGXw/lDfF1T15lHHo2FDAQqDa2rbEe+ycDC7Qa3eJjcsN284n1RM2pl+\n" + "iNyyhS09YVadelBxWsMqnwdDlf5lrSa7DW1+/u/z2iAw8lGka8XpFpsCgYEA0emd\n" + "sYqNivonQFEqJxi2kGTBjX8HNEjeF9tTLuAAg0hjhbW4i1v3JsekW9thbG436THa\n" + "4zWUBmcaEwx0iTD1dqM+d+PbN/4vxoRx9kWQJicfR+sa6eJiwL5UmiqDdX4to5z9\n" + "MbahemNBzYybr7lcvw+RbL91Fr/z3GooDM9rxkUCgYAuF8mUeTGfy1n2a5BHTV9u\n" + "q9FPQKNmxitPnA7GfoARwvrMtJ+BZ8M4FIEbOFArCWhWqkylUNCvP6ZryvQnlY9A\n" + "A7PM/os1oFfssSoaPHhmyL8KQcciz3qHSMOf81wHaCpSAnmJnhnstjX8lUqPZIO9\n" + "NKj7rBqycaYn02Y3sHP5YQKBgQDQxOQNW5uCiWDYWuDtmWqZGVxW+euUWJRqbbvB\n" + "dw+LgkdZCG7OS1z3uL8CjKHMUaJRzz+/kd3ysEACifStLYAzyg+q9XdlrOyfJ8Kg\n" + "CHdhOq+lu3I9Aubsg19pJLcx95g0jUJUWysmqekcIagFkPlpHHaqDZDKW4aRxRKo\n" + "CvNJcQKBgA9DB8OzHA/gp8TztxUZu8hAVfehLxVORquFvMRF0cr8uxjbu/6sDhzc\n" + "TRUkXRUe4DGxxMzAd+1SF/IWlcuZlfcuZrytH1hbjmrN8H30y+yGXFsSGCI/rudk\n" + "rLXNS+vWEeuOV8lQuQY0fkokmxnmhkPDMXra5/3KrVMzm3ZNF5N8\n" + "-----END RSA PRIVATE KEY-----\n"; + +const char *nuts_client_crt = + "-----BEGIN CERTIFICATE-----\n" + "MIIDdzCCAl8CFEzqJgxMn+OTdw7RjLtz8FlhrQ0HMA0GCSqGSIb3DQEBCwUAMHcx\n" + "CzAJBgNVBAYTAlhYMQ8wDQYDVQQIDAZVdG9waWExETAPBgNVBAcMCFBhcmFkaXNl\n" + "MRgwFgYDVQQKDA9OTkcgVGVzdHMsIEluYy4xFDASBgNVBAsMC0NsaWVudCBDZXJ0\n" + "MRQwEgYDVQQDDAtUZXN0IENsaWVudDAgFw0yMDA1MjMxODQ1MjZaGA8yMTIwMDQy\n" + "OTE4NDUyNlowdzELMAkGA1UEBhMCWFgxDzANBgNVBAgMBlV0b3BpYTERMA8GA1UE\n" + "BwwIUGFyYWRpc2UxGDAWBgNVBAoMD05ORyBUZXN0cywgSW5jLjEUMBIGA1UECwwL\n" + "Q2xpZW50IENlcnQxFDASBgNVBAMMC1Rlc3QgQ2xpZW50MIIBIjANBgkqhkiG9w0B\n" + "AQEFAAOCAQ8AMIIBCgKCAQEAoHWEJXvfaHDM33AyYbJHggKOllgcvwscEnsXztIt\n" + "OK+0jO6SRFSbtye1cjtrkGVCYBjeWMcOdEiNB0pw3PceVpF/Q9ifCuaSYsJA3sPH\n" + "wi/A3G7ZTe2KCH1i26I4zyw1Bn5AzkaDDXsaht2S9PEqIBCbWo/V1pWiv4QdYmLT\n" + "/UFYJDxFpFC3iKVC+BDv9yzziyaFXOYsQJXcaq8ZRD79bNV5NFfzUih8RoasIdD4\n" + "LoamBSbbr5XzstTISus+wu1JDKgKkYMJhLGA/tdU/eOKuTDx89yO4ba23W74xeqW\n" + "JYe0wPy+krmeB5M7UA7jIvg1JXhYACxujhieMp7wcC3FPwIDAQABMA0GCSqGSIb3\n" + "DQEBCwUAA4IBAQCMTQ89YnD19bCGIdUl/z6w2yx1x1kvTYHT+SzhUprsgiuS3KT1\n" + "RZNhjf5U3Yu+B6SrJCLuylv+L2zQfmHogp3lV7bayOA7r/rVy5fdmHS+Ei1w6LDL\n" + "t8jayiRMPG4VCgaG486yI73PFpK5DXnyFqSd23TlWvNoNeVag5gjlhzG+mHZBSB2\n" + "ExpGY3SPxrKSzDqIITVPVgzjW25N8qtgLXC6HODDiViNYq1nmuoS4O80NIYAPPs6\n" + "sxUMa5kT+zc17q57ZcgNq/sSGI3BU4b/E/8ntIwiui2xWSf/4JR6xtanih8uY5Pu\n" + "QTgg9qTtFgtu4WWUP7JhreoINTw6O4/g5Z18\n" + "-----END CERTIFICATE-----\n"; diff --git a/src/testing/marry.c b/src/testing/marry.c new file mode 100644 index 00000000..c0328df9 --- /dev/null +++ b/src/testing/marry.c @@ -0,0 +1,312 @@ +// +// Copyright 2020 Staysail Systems, Inc. +// Copyright 2018 Capitar IT Group BV +// +// This software is supplied under the terms of the MIT License, a +// copy of which should be located in the distribution where this +// file was obtained (LICENSE.txt). A copy of the license may also be +// found online at https://opensource.org/licenses/MIT. +// + +#ifdef _WIN32 + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif + +#include +#include +#include +#else +#include +#include +#include +#include +#include +#include +#endif +#include +#include +#include +#include +#include + +#define TEST_NO_MAIN +#include "nuts.h" + +void +nuts_scratch_addr(const char *scheme, size_t sz, char *addr) +{ + if ((strcmp(scheme, "inproc") == 0) || + (strcmp(scheme, "abstract") == 0)) { + (void) snprintf(addr, sz, "%s://nuts%04x%04x%04x%04x", scheme, + nng_random(), nng_random(), nng_random(), nng_random()); + return; + } + + if ((strncmp(scheme, "tcp", 3) == 0) || + (strncmp(scheme, "tls", 3) == 0)) { + (void) snprintf( + addr, sz, "%s://127.0.0.1:%u", scheme, nuts_next_port()); + return; + } + + if (strncmp(scheme, "ws", 2) == 0) { + (void) snprintf(addr, sz, + "%s://127.0.0.1:%u/nuts%04x%04x%04x%04x", scheme, + nuts_next_port(), nng_random(), nng_random(), nng_random(), + nng_random()); + return; + } + + if ((strncmp(scheme, "ipc", 3) == 0) || + (strncmp(scheme, "unix", 4) == 0)) { +#ifdef _WIN32 + // Windows doesn't place IPC names in the filesystem. + (void) snprintf(addr, sz, "%s://nuts%04x%04x%04x%04x", scheme, + nng_random(), nng_random(), nng_random(), nng_random()); + return; +#else + char *tmpdir; + + if (((tmpdir = getenv("TMPDIR")) == NULL) && + ((tmpdir = getenv("TEMP")) == NULL) && + ((tmpdir = getenv("TMP")) == NULL)) { + tmpdir = "/tmp"; + } + + (void) snprintf(addr, sz, "%s://%s/nuts%04x%04x%04x%04x", + scheme, tmpdir, nng_random(), nng_random(), nng_random(), + nng_random()); + return; +#endif + } + + // We should not be here. + abort(); +} + +// nuts_next_port returns a "next" allocation port. +// Ports are chosen by starting from a random point within a +// range (normally 38000-40000, but other good places to choose +// might be 36000-37000, 42000-43000, 45000-47000, 48000-49000. +// These are non-ephemeral ports. Successive calls to this function +// will return the next port in the range (wrapping). This works even +// across process boundaries, as the range is tracked in a file named +// by $TEST_PORT_FILE. The range of ports can be configured by using +// $TEST_PORT_RANGE (the range is specified as "lo:hi" where the actual +// port will be in the range [lo,hi). +uint16_t +nuts_next_port(void) +{ + char * name; + FILE * f; + uint16_t port; + uint16_t base; + uint16_t end; + char * str; +#ifdef _WIN32 + OVERLAPPED olp; + HANDLE h; +#endif + + if ((name = getenv("NUTS_PORT_FILE")) == NULL) { + name = ".nuts_ports"; + } + if (((str = getenv("NUTS_PORT_RANGE")) == NULL) || + ((sscanf(str, "%hu:%hu", &base, &end)) != 1) || + ((int) end - (int) base) < 1) { + base = 38000; + end = 40000; + } + + if (((f = fopen(name, "r+")) == NULL) && + ((f = fopen(name, "w+")) == NULL)) { + return (0); + } + (void) fseek(f, 0, SEEK_SET); + +#ifdef _WIN32 + h = (HANDLE) _get_osfhandle(_fileno(f)); + + // This contains the offset information for LockFileEx. + ZeroMemory(&olp, sizeof(olp)); + + if (LockFileEx(h, LOCKFILE_EXCLUSIVE_LOCK, 0, MAXDWORD, MAXDWORD, + &olp) == FALSE) { + fclose(f); + return (0); + } +#else + if (lockf(fileno(f), 0, F_LOCK) != 0) { + (void) fclose(f); + return (0); + } +#endif + if (fscanf(f, "%hu", &port) != 1) { + unsigned seed = (unsigned) time(NULL); + +#ifdef _WIN32 + port = base + rand_s(&seed) % (end - base); +#else + port = base + rand_r(&seed) % (end - base); +#endif + } + port++; + if ((port < base) || (port >= (base + end))) { + port = base; + } + +#ifdef _WIN32 + fseek(f, 0, SEEK_SET); + SetEndOfFile(h); + (void) fprintf(f, "%u", port); + ZeroMemory(&olp, sizeof(olp)); + (void) UnlockFileEx(h, 0, MAXDWORD, MAXDWORD, &olp); +#else + fseek(f, 0, SEEK_SET); + if (ftruncate(fileno(f), 0) != 0) { + (void) fclose(f); + return (0); + } + + (void) fprintf(f, "%u", port); + (void) lockf(fileno(f), 0, F_ULOCK); + +#endif + (void) fclose(f); + return (port); +} + +struct marriage_notice { + nng_mtx *mx; + nng_cv * cv; + int s1; + int s2; + int cnt1; + int cnt2; + nng_pipe p1; + nng_pipe p2; +}; + +static void +married(nng_pipe p, nng_pipe_ev ev, void *arg) +{ + struct marriage_notice *notice = arg; + (void) ev; + + nng_mtx_lock(notice->mx); + if (nng_socket_id(nng_pipe_socket(p)) == notice->s1) { + notice->cnt1++; + notice->p1 = p; + } else if (nng_socket_id(nng_pipe_socket(p)) == notice->s2) { + notice->cnt2++; + notice->p2 = p; + } + nng_cv_wake(notice->cv); + nng_mtx_unlock(notice->mx); +} + +int +nuts_marry(nng_socket s1, nng_socket s2) +{ + return (nuts_marry_ex(s1, s2, NULL, NULL, NULL)); +} + +// NB: This function is always called with sufficient space to +// hold the resulting expansion. +static void +replace_port_zero(const char *addr, char *buf, int port) +{ + int i; + int j; + bool colon; + char c; + + for (i = 0, j = 0; (c = addr[i]) != '\0'; i++) { + + if (colon && c == '0') { + char num[16]; + (void) snprintf(num, sizeof(num), "%d", port); + memcpy(&buf[j], num, strlen(num)); + j += (int) strlen(num); + colon = false; + continue; + } + colon = c == ':'; + buf[j++] = c; + } + buf[j] = '\0'; +} + +int +nuts_marry_ex( + nng_socket s1, nng_socket s2, const char *url, nng_pipe *p1, nng_pipe *p2) +{ + struct marriage_notice note; + nng_time timeout; + int rv; + char addr[64]; + nng_listener l; + int port; + + if (url == NULL) { + (void) snprintf(addr, sizeof(addr), + "inproc://marry%04x%04x%04x%04x", nng_random(), + nng_random(), nng_random(), nng_random()); + url = addr; + } + + note.cnt1 = 0; + note.cnt2 = 0; + note.s1 = nng_socket_id(s1); + note.s2 = nng_socket_id(s2); + timeout = nng_clock() + 1000; // 1 second + + if (((rv = nng_mtx_alloc(¬e.mx)) != 0) || + ((rv = nng_cv_alloc(¬e.cv, note.mx)) != 0) || + ((rv = nng_pipe_notify( + s1, NNG_PIPE_EV_ADD_POST, married, ¬e)) != 0) || + ((rv = nng_pipe_notify( + s2, NNG_PIPE_EV_ADD_POST, married, ¬e)) != 0) || + ((rv = nng_listen(s1, url, &l, 0)) != 0)) { + goto done; + } + + // If a TCP port of zero was selected, let's ask for the actual + // port bound. + if ((strstr(url, ":0") != NULL) && + (nng_listener_get_int(l, NNG_OPT_TCP_BOUND_PORT, &port) == 0) && + (port > 0)) { + replace_port_zero(url, addr, port); + url = addr; + } + if ((rv = nng_dial(s2, url, NULL, 0)) != 0) { + goto done; + } + + nng_mtx_lock(note.mx); + while ((note.cnt1 == 0) || (note.cnt2 == 0)) { + if ((rv = nng_cv_until(note.cv, timeout)) != 0) { + break; + } + } + nng_mtx_unlock(note.mx); + if (p1 != NULL) { + *p1 = note.p1; + } + if (p2 != NULL) { + *p2 = note.p2; + } + +done: + nng_pipe_notify(s1, NNG_PIPE_EV_ADD_POST, NULL, NULL); + nng_pipe_notify(s2, NNG_PIPE_EV_ADD_POST, NULL, NULL); + if (note.cv != NULL) { + nng_cv_free(note.cv); + } + if (note.mx != NULL) { + nng_mtx_free(note.mx); + } + return (rv); +} diff --git a/src/testing/nuts.h b/src/testing/nuts.h new file mode 100644 index 00000000..2ed8744c --- /dev/null +++ b/src/testing/nuts.h @@ -0,0 +1,206 @@ +// +// Copyright 2020 Staysail Systems, Inc. +// +// This software is supplied under the terms of the MIT License, a +// copy of which should be located in the distribution where this +// file was obtained (LICENSE.txt). A copy of the license may also be +// found online at https://opensource.org/licenses/MIT. +// + +// NUTS - NNG Unit Test Support +// +// This is the NNG testing support library. It is used in the NNG +// project to support the various unit tests. It should not be used +// in other projects, and no guarantees are made about interface +// stability, etc. + +#ifndef NNG_TESTING_NUTS_H +#define NNG_TESTING_NUTS_H + +#include "acutest.h" + +#include +#include +#include + +// The following headers are provided for test code convenience. +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// nuts_clock returns the current time in milliseconds. +// The reference clock may be any point in the past (typically since +// the program started running.) +extern uint64_t nuts_clock(void); + +// nuts_poll_fd tests if the given file descriptor polls as readable. +extern bool nuts_poll_fd(int); + +// nuts_be16 converts native and big-endian words. +extern uint16_t nuts_be16(uint16_t); + +// nuts_be32 converts native and big-endian double-words. +extern uint32_t nuts_be32(uint32_t); + +// nuts_sleep sleeps the specified number of milliseconds. +extern void nuts_sleep(int); + +// nuts_next_port returns a new port number (presumably unique) +extern uint16_t nuts_next_port(void); + +// nuts_scratch_addr makes a scratch address for the given scheme. +// The address buffer must be supplied, and the size should be at least +// 64 bytes to ensure no truncation occurs. +extern void nuts_scratch_addr(const char *, size_t, char *); + +// nuts_marry connects two sockets using inproc. It uses socket +// pipe hooks to ensure that it does not return before both sockets +// are fully connected. +extern int nuts_marry(nng_socket, nng_socket); + +// nuts_marry_ex is like nuts_marry, but returns the pipes that +// were connected, and includes an optional URL. The pipe pointers and the +// URL may be NULL if not needed. If a port number is part of the URL +// and is zero (i.e. if the URL contains :0) then listen is done first, +// and the actual bound port will be used for the client. +extern int nuts_marry_ex( + nng_socket, nng_socket, const char *, nng_pipe *, nng_pipe *); + +// nuts_stream_send_start and nuts_stream_recv_start are used +// to initiate transfers asynchronously. They return a token which can +// be used with nuts_stream_wait, which will return the result of +// the operation (0 on success, an NNG error number otherwise.) +extern void *nuts_stream_send_start(nng_stream *, void *, size_t); +extern void *nuts_stream_recv_start(nng_stream *, void *, size_t); +extern int nuts_stream_wait(void *); + +// These are TLS certificates. The client and server are signed with the +// root. The server uses CN 127.0.0.1. Other details are bogus, but +// designed to prevent accidental use elsewhere. +extern const char *nuts_server_key; +extern const char *nuts_server_crt; +extern const char *nuts_client_key; +extern const char *nuts_client_crt; + +// NUTS_SUCCESS tests for NNG success. It reports the failure if it +// did not. +#define NUTS_PASS(cond) \ + do { \ + int result_ = (cond); \ + TEST_CHECK_(result_ == 0, "%s succeeds", #cond); \ + TEST_MSG("%s: expected success, got %s (%d)", #cond, \ + nng_strerror(result_), result_); \ + } while (0) + +// NUTS_ERROR tests for a specific NNG error code. +#define NUTS_FAIL(cond, expect) \ + do { \ + int result_ = (cond); \ + TEST_CHECK_(result_ == (expect), "%s fails with %s", #cond, \ + nng_strerror(expect)); \ + TEST_MSG("%s: expected %s (%d), got %s (%d)", #cond, \ + nng_strerror(expect), expect, nng_strerror(result_), \ + result_); \ + } while (0) + +#define NUTS_SEND(sock, string) \ + NUTS_PASS(nng_send(sock, string, strlen(string) + 1, 0)) + +#define NUTS_RECV(sock, string) \ + do { \ + char buf_[64]; \ + size_t sz_ = sizeof(buf_); \ + int rv_ = nng_recv(sock, &buf_, &sz_, 0); \ + TEST_CHECK_( \ + rv_ == 0, "nng_recv (%d %s)", rv_, nng_strerror(rv_)); \ + TEST_CHECK_(sz_ == strlen(string) + 1, "length %d want %d", \ + sz_, strlen(string) + 1); \ + buf_[sizeof(buf_) - 1] = '\0'; \ + TEST_CHECK_( \ + strcmp(string, buf_) == 0, "%s == %s", string, buf_); \ + } while (0) + +#define NUTS_MATCH(s1, s2) \ + do { \ + TEST_CHECK_(strcmp(s1, s2) == 0, "%s == %s", s1, s2); \ + } while (0) + +#define NUTS_NULL(x) \ + do { \ + TEST_CHECK_((x) == NULL, "%p == NULL", x); \ + } while (0) + +#define NUTS_ADDR(var, scheme) \ + do { \ + static char nuts_addr_[64]; \ + nuts_scratch_addr(scheme, sizeof(nuts_addr_), nuts_addr_); \ + (var) = nuts_addr_; \ + } while (0) + +#define NUTS_OPEN(sock) NUTS_PASS(nng_pair1_open(&(sock))) + +#define NUTS_CLOSE(sock) NUTS_PASS(nng_close(sock)) + +#define NUTS_SLEEP(ms) nuts_sleep(ms) + +#define NUTS_CLOCK(var) \ + do { \ + (var) = nuts_clock(); \ + } while (0) + +#define NUTS_BEFORE(when) \ + do { \ + uint64_t nuts_t0_ = (when); \ + uint64_t nuts_t1_ = nuts_clock(); \ + TEST_CHECK_(nuts_t1_ < nuts_t0_, \ + "time before, deadline %lld, current %lld, delta %lld", \ + (long long) nuts_t0_, (long long) nuts_t1_, \ + (long long) nuts_t0_ - (long long) nuts_t1_); \ + } while (0) + +#define NUTS_AFTER(when) \ + do { \ + uint64_t nuts_t0_ = (when); \ + uint64_t nuts_t1_ = nuts_clock(); \ + TEST_CHECK_(nuts_t1_ >= nuts_t0_, \ + "time after, deadline %lld, current %lld, delta %lld", \ + (long long) nuts_t0_, (long long) nuts_t1_, \ + (long long) nuts_t0_ - (long long) nuts_t1_); \ + } while (0) + +#define NUTS_MARRY(s1, s2) NUTS_PASS(nuts_marry(s1, s2)) +#define NUTS_MARRY_EX(s1, s2, url, p1, p2) \ + NUTS_PASS(nuts_marry_ex(s1, s2, url, p1, p2)) + +// Redefine some macros from acutest.h for consistency. +#define NUTS_TRUE TEST_CHECK +#define NUTS_ASSERT TEST_ASSERT +#define NUTS_CASE TEST_CASE +#define NUTS_MSG TEST_MSG + +#define NUTS_TESTS TEST_LIST + +#define NUTS_PROTO(x, y) (((x) << 4u) | (y)) + +#ifdef __cplusplus +}; +#endif + +#endif // NNG_TEST_NUTS_H diff --git a/src/testing/streams.c b/src/testing/streams.c new file mode 100644 index 00000000..d718ab76 --- /dev/null +++ b/src/testing/streams.c @@ -0,0 +1,146 @@ +// +// Copyright 2020 Staysail Systems, Inc. +// Copyright 2018 Capitar IT Group BV +// +// This software is supplied under the terms of the MIT License, a +// copy of which should be located in the distribution where this +// file was obtained (LICENSE.txt). A copy of the license may also be +// found online at https://opensource.org/licenses/MIT. +// + +#define TEST_NO_MAIN + +#include +#include + +typedef struct { + uint8_t * base; + size_t rem; + nng_iov iov; + nng_aio * upper_aio; + nng_aio * lower_aio; + nng_stream *s; + void (*submit)(nng_stream *, nng_aio *); +} stream_xfr_t; + +static void +stream_xfr_free(stream_xfr_t *x) +{ + if (x == NULL) { + return; + } + if (x->upper_aio != NULL) { + nng_aio_free(x->upper_aio); + } + if (x->lower_aio != NULL) { + nng_aio_free(x->lower_aio); + } + nng_free(x, sizeof(*x)); +} + +static void +stream_xfr_start(stream_xfr_t *x) +{ + nng_iov iov; + iov.iov_buf = x->base; + iov.iov_len = x->rem; + + nng_aio_set_iov(x->lower_aio, 1, &iov); + x->submit(x->s, x->lower_aio); +} + +static void +stream_xfr_cb(void *arg) +{ + stream_xfr_t *x = arg; + int rv; + size_t n; + + rv = nng_aio_result(x->lower_aio); + if (rv != 0) { + nng_aio_finish(x->upper_aio, rv); + return; + } + n = nng_aio_count(x->lower_aio); + + x->rem -= n; + x->base += n; + + if (x->rem == 0) { + nng_aio_finish(x->upper_aio, 0); + return; + } + + stream_xfr_start(x); +} + +static stream_xfr_t * +stream_xfr_alloc(nng_stream *s, void (*submit)(nng_stream *, nng_aio *), + void *buf, size_t size) +{ + stream_xfr_t *x; + + if ((x = nng_alloc(size)) == NULL) { + return (NULL); + } + if (nng_aio_alloc(&x->upper_aio, NULL, NULL) != 0) { + stream_xfr_free(x); + return (NULL); + } + if (nng_aio_alloc(&x->lower_aio, stream_xfr_cb, x) != 0) { + stream_xfr_free(x); + return (NULL); + } + + // Upper should not take more than 30 seconds, lower not more than 5. + nng_aio_set_timeout(x->upper_aio, 30000); + nng_aio_set_timeout(x->lower_aio, 5000); + + nng_aio_begin(x->upper_aio); + + x->s = s; + x->rem = size; + x->base = buf; + x->submit = submit; + + return (x); +} + +int +nuts_stream_wait(stream_xfr_t *x) +{ + int rv; + if (x == NULL) { + return (NNG_ENOMEM); + } + nng_aio_wait(x->upper_aio); + rv = nng_aio_result(x->upper_aio); + stream_xfr_free(x); + return (rv); +} + +void * +nuts_stream_recv_start(nng_stream *s, void *buf, size_t size) +{ + stream_xfr_t *x; + + x = stream_xfr_alloc(s, nng_stream_recv, buf, size); + if (x == NULL) { + return (x); + } + stream_xfr_start(x); + return (x); +} + +void * +nuts_stream_send_start(nng_stream *s, void *buf, size_t size) +{ + stream_xfr_t *x; + + x = stream_xfr_alloc(s, nng_stream_send, buf, size); + if (x == NULL) { + return (x); + } + stream_xfr_start(x); + return (x); +} diff --git a/src/testing/util.c b/src/testing/util.c new file mode 100644 index 00000000..eeb70b4f --- /dev/null +++ b/src/testing/util.c @@ -0,0 +1,164 @@ +// +// Copyright 2020 Staysail Systems, Inc. +// Copyright 2018 Capitar IT Group BV +// +// This software is supplied under the terms of the MIT License, a +// copy of which should be located in the distribution where this +// file was obtained (LICENSE.txt). A copy of the license may also be +// found online at https://opensource.org/licenses/MIT. +// + +#define TEST_NO_MAIN + +#ifdef _WIN32 + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif + +#include +#include +// order counts +#include +#define poll WSAPoll +#include +#else +#include +#include +#include +#include +#include +#include +#endif +#include +#include +#include +#include +#include + +#if !defined(_WIN32) && !defined(CLOCK_MONOTONIC) +#include +#endif + +#include "nuts.h" + +uint64_t +nuts_clock(void) +{ +#ifdef _WIN32 + return (GetTickCount64()); +#elif defined(CLOCK_MONTONIC) + struct timespec ts; + clock_gettime(CLOCK_MONOTONIC, &ts); + uint64_t val; + + val = ts.tv_sec; + val *= 1000; + val += ts.tv_nsec / 1000000; + return (val); +#else + static time_t epoch; + struct timeval tv; + + if (epoch == 0) { + epoch = time(NULL); + } + gettimeofday(&tv, NULL); + + if (tv.tv_sec < epoch) { + // Broken clock. + // This will force all other timing tests to fail + return (0); + } + tv.tv_sec -= epoch; + return ( + ((uint64_t)(tv.tv_sec) * 1000) + (uint64_t)(tv.tv_usec / 1000)); +#endif + +#ifdef _WIN32 +#else +#include +#include +#endif +} + +bool +nuts_poll_fd(int fd) +{ +#ifdef _WIN32 + struct pollfd pfd; + pfd.fd = (SOCKET) fd; + pfd.events = POLLRDNORM; + pfd.revents = 0; + + switch (WSAPoll(&pfd, 1, 0)) { + case 0: + return (false); + case 1: + return (true); + } +#else + struct pollfd pfd; + + pfd.fd = fd; + pfd.events = POLLRDNORM; + pfd.revents = 0; + + switch (poll(&pfd, 1, 0)) { + case 0: + return (false); + case 1: + return (true); + } +#endif + return (false); +} + +static bool +is_little_endian(void) +{ + uint16_t num = 0x1; + uint8_t *ptr = (uint8_t *) (void *) (&num); + return (ptr[0] == 1); +} + +uint16_t +nuts_be16(uint16_t in) +{ + if (is_little_endian()) { + in = ((in / 0x100) + ((in % 0x100) * 0x100)); + } + return (in); +} + +uint32_t +nuts_be32(uint32_t in) +{ + if (is_little_endian()) { + in = ((in >> 24u) & 0xffu) | ((in >> 8u) & 0xff00u) | + ((in << 8u) & 0xff0000u) | ((in << 24u) & 0xff000000u); + } + return (in); +} + +void +nuts_sleep(int msec) +{ +#ifdef _WIN32 + Sleep(msec); +#elif defined(CLOCK_MONOTONIC) + struct timespec ts; + + ts.tv_sec = msec / 1000; + ts.tv_nsec = (msec % 1000) * 1000000; + + // Do this in a loop, so that interrupts don't actually wake us. + while (ts.tv_sec || ts.tv_nsec) { + if (nanosleep(&ts, &ts) == 0) { + break; + } + } +#else + poll(NULL, 0, msec); +#endif +} -- cgit v1.2.3-70-g09d2