diff options
author | Marat Dukhan <maratek@google.com> | 2020-03-26 14:19:30 -0700 |
---|---|---|
committer | Marat Dukhan <maratek@google.com> | 2020-03-26 14:19:30 -0700 |
commit | 76042155a8b1e189c8f141429fd72219472c32e1 (patch) | |
tree | 0fc182011d2fe74c23115b411181b0c0f1dbe236 | |
parent | 6469659dd404768fd80f1989dfd66930a6587bf8 (diff) | |
download | pthreadpool-76042155a8b1e189c8f141429fd72219472c32e1.tar.gz |
Microarchitecture-aware parallelization functions
-rw-r--r-- | CMakeLists.txt | 32 | ||||
-rw-r--r-- | cmake/DownloadCpuinfo.cmake | 15 | ||||
-rw-r--r-- | include/pthreadpool.h | 239 | ||||
-rw-r--r-- | src/threadpool-pthreads.c | 424 | ||||
-rw-r--r-- | src/threadpool-shim.c | 83 | ||||
-rw-r--r-- | test/pthreadpool.cc | 1554 |
6 files changed, 2337 insertions, 10 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 714325a..79a17a1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -39,6 +39,21 @@ IF(NOT DEFINED FXDIV_SOURCE_DIR) SET(FXDIV_SOURCE_DIR "${CMAKE_BINARY_DIR}/FXdiv-source" CACHE STRING "FXdiv source directory") ENDIF() +IF(CMAKE_SYSTEM_NAME MATCHES "^(Linux|Android)$" AND CMAKE_SYSTEM_PROCESSOR MATCHES "^(armv[5-8].*|aarch64)$") + IF(NOT DEFINED CPUINFO_SOURCE_DIR) + MESSAGE(STATUS "Downloading cpuinfo to ${CMAKE_BINARY_DIR}/cpuinfo-source (define CPUINFO_SOURCE_DIR to avoid it)") + CONFIGURE_FILE(cmake/DownloadCpuinfo.cmake "${CMAKE_BINARY_DIR}/cpuinfo-download/CMakeLists.txt") + EXECUTE_PROCESS(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" . + WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/cpuinfo-download") + EXECUTE_PROCESS(COMMAND "${CMAKE_COMMAND}" --build . + WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/cpuinfo-download") + SET(CPUINFO_SOURCE_DIR "${CMAKE_BINARY_DIR}/cpuinfo-source" CACHE STRING "cpuinfo source directory") + ENDIF() + SET(PTHREADPOOL_USE_CPUINFO ON) +ELSE() + SET(PTHREADPOOL_USE_CPUINFO OFF) +ENDIF() + IF(PTHREADPOOL_BUILD_TESTS AND NOT DEFINED GOOGLETEST_SOURCE_DIR) MESSAGE(STATUS "Downloading Google Test to ${CMAKE_BINARY_DIR}/googletest-source (define GOOGLETEST_SOURCE_DIR to avoid it)") CONFIGURE_FILE(cmake/DownloadGoogleTest.cmake "${CMAKE_BINARY_DIR}/googletest-download/CMakeLists.txt") @@ -122,6 +137,23 @@ IF(NOT TARGET fxdiv) ENDIF() TARGET_LINK_LIBRARIES(pthreadpool PRIVATE fxdiv) +# ---[ Configure cpuinfo +IF(PTHREADPOOL_USE_CPUINFO) + IF(NOT TARGET cpuinfo) + SET(CPUINFO_BUILD_TOOLS OFF CACHE BOOL "") + SET(CPUINFO_BUILD_UNIT_TESTS OFF CACHE BOOL "") + SET(CPUINFO_BUILD_MOCK_TESTS OFF CACHE BOOL "") + SET(CPUINFO_BUILD_BENCHMARKS OFF CACHE BOOL "") + ADD_SUBDIRECTORY( + "${CPUINFO_SOURCE_DIR}" + "${CMAKE_BINARY_DIR}/cpuinfo") + ENDIF() + TARGET_LINK_LIBRARIES(pthreadpool PRIVATE cpuinfo) + TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_CPUINFO=1) +ELSE() + TARGET_COMPILE_DEFINITIONS(pthreadpool PRIVATE PTHREADPOOL_USE_CPUINFO=0) +ENDIF() + INSTALL(TARGETS pthreadpool LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) diff --git a/cmake/DownloadCpuinfo.cmake b/cmake/DownloadCpuinfo.cmake new file mode 100644 index 0000000..25213a0 --- /dev/null +++ b/cmake/DownloadCpuinfo.cmake @@ -0,0 +1,15 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 3.5 FATAL_ERROR) + +PROJECT(cpuinfo-download NONE) + +INCLUDE(ExternalProject) +ExternalProject_Add(cpuinfo + URL https://github.com/pytorch/cpuinfo/archive/0cc563acb9baac39f2c1349bc42098c4a1da59e3.tar.gz + URL_HASH SHA256=80625d0b69a3d69b70c2236f30db2c542d0922ccf9bb51a61bc39c49fac91a35 + SOURCE_DIR "${CMAKE_BINARY_DIR}/cpuinfo-source" + BINARY_DIR "${CMAKE_BINARY_DIR}/cpuinfo" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) diff --git a/include/pthreadpool.h b/include/pthreadpool.h index 4d21528..de4016b 100644 --- a/include/pthreadpool.h +++ b/include/pthreadpool.h @@ -16,6 +16,11 @@ typedef void (*pthreadpool_task_4d_tile_2d_t)(void*, size_t, size_t, size_t, siz typedef void (*pthreadpool_task_5d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t); typedef void (*pthreadpool_task_6d_tile_2d_t)(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_1d_with_id_t)(void*, uint32_t, size_t); +typedef void (*pthreadpool_task_2d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_3d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t); +typedef void (*pthreadpool_task_4d_tile_2d_with_id_t)(void*, uint32_t, size_t, size_t, size_t, size_t, size_t, size_t); + /** * Disable support for denormalized numbers to the maximum extent possible for @@ -103,6 +108,52 @@ void pthreadpool_parallelize_1d( uint32_t flags); /** + * Process items on a 1D grid using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range; i++) + * function(context, uarch_index, i); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each item. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range the number of items on the 1D grid to process. + * The specified function will be called once for each item. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_1d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range, + uint32_t flags); + +/** * Process items on a 1D grid with specified maximum tile size. * * The function implements a parallel version of the following snippet: @@ -249,6 +300,66 @@ void pthreadpool_parallelize_2d_tile_2d( uint32_t flags); /** + * Process items on a 2D grid with the specified maximum tile size along each + * grid dimension using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i += tile_i) + * for (size_t j = 0; j < range_j; j += tile_j) + * function(context, uarch_index, i, j, + * min(range_i - i, tile_i), min(range_j - j, tile_j)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, + * cpuinfo initialization failed, or index returned + * by cpuinfo_get_current_uarch_index() exceeds + * the max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected + * by the specified function. If the index returned + * by cpuinfo_get_current_uarch_index() exceeds this + * value, default_uarch_index will be used instead. + * default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 2D grid. + * @param range_j the number of items to process along the second + * dimension of the 2D grid. + * @param tile_j the maximum number of items along the first + * dimension of the 2D grid to process in one function call. + * @param tile_j the maximum number of items along the second + * dimension of the 2D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_2d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags); + +/** * Process items on a 3D grid with the specified maximum tile size along the * last two grid dimensions. * @@ -295,6 +406,68 @@ void pthreadpool_parallelize_3d_tile_2d( uint32_t flags); /** + * Process items on a 3D grid with the specified maximum tile size along the + * last two grid dimensions using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j += tile_j) + * for (size_t k = 0; k < range_k; k += tile_k) + * function(context, uarch_index, i, j, k, + * min(range_j - j, tile_j), min(range_k - k, tile_k)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 3D grid. + * @param range_j the number of items to process along the second + * dimension of the 3D grid. + * @param range_k the number of items to process along the third + * dimension of the 3D grid. + * @param tile_j the maximum number of items along the second + * dimension of the 3D grid to process in one function call. + * @param tile_k the maximum number of items along the third + * dimension of the 3D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_3d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags); + +/** * Process items on a 4D grid with the specified maximum tile size along the * last two grid dimensions. * @@ -345,6 +518,72 @@ void pthreadpool_parallelize_4d_tile_2d( uint32_t flags); /** + * Process items on a 4D grid with the specified maximum tile size along the + * last two grid dimensions using a microarchitecture-aware task function. + * + * The function implements a parallel version of the following snippet: + * + * uint32_t uarch_index = cpuinfo_initialize() ? + * cpuinfo_get_current_uarch_index() : default_uarch_index; + * if (uarch_index > max_uarch_index) uarch_index = default_uarch_index; + * for (size_t i = 0; i < range_i; i++) + * for (size_t j = 0; j < range_j; j++) + * for (size_t k = 0; k < range_k; k += tile_k) + * for (size_t l = 0; l < range_l; l += tile_l) + * function(context, uarch_index, i, j, k, l, + * min(range_k - k, tile_k), min(range_l - l, tile_l)); + * + * When the function returns, all items have been processed and the thread pool + * is ready for a new task. + * + * @note If multiple threads call this function with the same thread pool, the + * calls are serialized. + * + * @param threadpool the thread pool to use for parallelisation. If + * threadpool is NULL, all items are processed serially on the calling + * thread. + * @param function the function to call for each tile. + * @param context the first argument passed to the specified + * function. + * @param default_uarch_index the microarchitecture index to use when + * pthreadpool is configured without cpuinfo, cpuinfo initialization failed, + * or index returned by cpuinfo_get_current_uarch_index() exceeds the + * max_uarch_index value. + * @param max_uarch_index the maximum microarchitecture index expected by + * the specified function. If the index returned by + * cpuinfo_get_current_uarch_index() exceeds this value, default_uarch_index + * will be used instead. default_uarch_index can exceed max_uarch_index. + * @param range_i the number of items to process along the first + * dimension of the 4D grid. + * @param range_j the number of items to process along the second + * dimension of the 4D grid. + * @param range_k the number of items to process along the third + * dimension of the 4D grid. + * @param range_l the number of items to process along the fourth + * dimension of the 4D grid. + * @param tile_k the maximum number of items along the third + * dimension of the 4D grid to process in one function call. + * @param tile_l the maximum number of items along the fourth + * dimension of the 4D grid to process in one function call. + * @param flags a bitwise combination of zero or more optional + * flags (PTHREADPOOL_FLAG_DISABLE_DENORMALS or + * PTHREADPOOL_FLAG_YIELD_WORKERS) + */ +void pthreadpool_parallelize_4d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_2d_with_id_t function, + void* context, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags); + +/** * Process items on a 5D grid with the specified maximum tile size along the * last two grid dimensions. * diff --git a/src/threadpool-pthreads.c b/src/threadpool-pthreads.c index 6ebd521..0a9c06d 100644 --- a/src/threadpool-pthreads.c +++ b/src/threadpool-pthreads.c @@ -9,6 +9,10 @@ #include <pthread.h> #include <unistd.h> +#ifndef PTHREADPOOL_USE_CPUINFO + #define PTHREADPOOL_USE_CPUINFO 0 +#endif + #ifndef PTHREADPOOL_USE_FUTEX #if defined(__linux__) #define PTHREADPOOL_USE_FUTEX 1 @@ -19,6 +23,10 @@ #endif #endif +#if PTHREADPOOL_USE_CPUINFO + #include <cpuinfo.h> +#endif + /* Futex-specific headers */ #if PTHREADPOOL_USE_FUTEX #if defined(__linux__) @@ -164,6 +172,17 @@ struct PTHREADPOOL_CACHELINE_ALIGNED thread_info { PTHREADPOOL_STATIC_ASSERT(sizeof(struct thread_info) % PTHREADPOOL_CACHELINE_SIZE == 0, "thread_info structure must occupy an integer number of cache lines (64 bytes)"); +struct pthreadpool_1d_with_uarch_params { + /** + * Copy of the default uarch index argument passed to a microarchitecture-aware parallelization function. + */ + uint32_t default_uarch_index; + /** + * Copy of the max uarch index argument passed to a microarchitecture-aware parallelization function. + */ + uint32_t max_uarch_index; +}; + struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool { /** * The number of threads that are processing an operation. @@ -195,6 +214,13 @@ struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool { */ pthreadpool_atomic_void_p argument; /** + * Additional parallelization parameters. + * These parameters are specific for each thread_function. + */ + union { + struct pthreadpool_1d_with_uarch_params parallelize_1d_with_uarch; + } params; + /** * Copy of the flags passed to a parallelization function. */ pthreadpool_atomic_uint32_t flags; @@ -220,8 +246,14 @@ struct PTHREADPOOL_CACHELINE_ALIGNED pthreadpool { */ pthread_cond_t command_condvar; #endif +#if PTHREADPOOL_USE_CPUINFO /** - * The number of threads in the thread pool. Never changes after initialization. + * Indication whether cpuinfo library initialized successfully. Never changes after pthreadpool_create. + */ + bool cpuinfo_is_initialized; +#endif + /** + * The number of threads in the thread pool. Never changes after pthreadpool_create. */ size_t threads_count; /** @@ -356,6 +388,45 @@ static void thread_parallelize_1d(struct pthreadpool* threadpool, struct thread_ pthreadpool_fence_release(); } +static void thread_parallelize_1d_with_uarch(struct pthreadpool* threadpool, struct thread_info* thread) { + const pthreadpool_task_1d_with_id_t task = (pthreadpool_task_1d_with_id_t) pthreadpool_load_relaxed_void_p(&threadpool->task); + void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument); + + const uint32_t default_uarch_index = threadpool->params.parallelize_1d_with_uarch.default_uarch_index; + uint32_t uarch_index = default_uarch_index; + #if PTHREADPOOL_USE_CPUINFO + if (threadpool && threadpool->cpuinfo_is_initialized) { + uarch_index = cpuinfo_get_current_uarch_index(); + if (uarch_index > threadpool->params.parallelize_1d_with_uarch.max_uarch_index) { + uarch_index = default_uarch_index; + } + } + #endif + + /* Process thread's own range of items */ + size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start); + while (atomic_decrement(&thread->range_length)) { + task(argument, uarch_index, range_start++); + } + + /* There still may be other threads with work */ + const size_t thread_number = thread->thread_number; + const size_t threads_count = threadpool->threads_count; + for (size_t tid = modulo_decrement(thread_number, threads_count); + tid != thread_number; + tid = modulo_decrement(tid, threads_count)) + { + struct thread_info* other_thread = &threadpool->threads[tid]; + while (atomic_decrement(&other_thread->range_length)) { + const size_t item_id = pthreadpool_fetch_sub_relaxed_size_t(&other_thread->range_end, 1) - 1; + task(argument, uarch_index, item_id); + } + } + + /* Make changes by this thread visible to other threads */ + pthreadpool_fence_release(); +} + static uint32_t wait_for_new_command( struct pthreadpool* threadpool, uint32_t last_command, @@ -501,6 +572,9 @@ struct pthreadpool* pthreadpool_create(size_t threads_count) { for (size_t tid = 0; tid < threads_count; tid++) { threadpool->threads[tid].thread_number = tid; } + #if PTHREADPOOL_USE_CPUINFO + threadpool->cpuinfo_is_initialized = cpuinfo_initialize(); + #endif /* Thread pool with a single thread computes everything on the caller thread. */ if (threads_count > 1) { @@ -539,6 +613,8 @@ size_t pthreadpool_get_threads_count(struct pthreadpool* threadpool) { static void pthreadpool_parallelize( struct pthreadpool* threadpool, thread_function_t thread_function, + const void* params, + size_t params_size, void* task, void* context, size_t linear_range, @@ -570,6 +646,11 @@ static void pthreadpool_parallelize( pthreadpool_store_relaxed_uint32_t(&threadpool->has_active_threads, 1); #endif + if (params_size != 0) { + memcpy(&threadpool->params, params, params_size); + pthreadpool_fence_release(); + } + /* Spread the work between threads */ size_t range_start = 0; for (size_t tid = 0; tid < threads_count; tid++) { @@ -659,11 +740,55 @@ void pthreadpool_parallelize_1d( } } else { pthreadpool_parallelize( - threadpool, &thread_parallelize_1d, + threadpool, &thread_parallelize_1d, NULL, 0, (void*) task, argument, range, flags); } } +void pthreadpool_parallelize_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_1d_with_id_t task, + void* argument, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range, + uint32_t flags) +{ + if (threadpool == NULL || threadpool->threads_count <= 1 || range <= 1) { + /* No thread pool used: execute task sequentially on the calling thread */ + + uint32_t uarch_index = default_uarch_index; + #if PTHREADPOOL_USE_CPUINFO + if (threadpool && threadpool->cpuinfo_is_initialized) { + uarch_index = cpuinfo_get_current_uarch_index(); + if (uarch_index > max_uarch_index) { + uarch_index = default_uarch_index; + } + } + #endif + + struct fpu_state saved_fpu_state = { 0 }; + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + saved_fpu_state = get_fpu_state(); + disable_fpu_denormals(); + } + for (size_t i = 0; i < range; i++) { + task(argument, uarch_index, i); + } + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + set_fpu_state(saved_fpu_state); + } + } else { + const struct pthreadpool_1d_with_uarch_params params = { + .default_uarch_index = default_uarch_index, + .max_uarch_index = max_uarch_index, + }; + pthreadpool_parallelize( + threadpool, &thread_parallelize_1d_with_uarch, ¶ms, sizeof(params), + task, argument, range, flags); + } +} + struct compute_1d_tile_1d_context { pthreadpool_task_1d_tile_1d_t task; void* argument; @@ -709,7 +834,7 @@ void pthreadpool_parallelize_1d_tile_1d( .tile = tile }; pthreadpool_parallelize( - threadpool, &thread_parallelize_1d, + threadpool, &thread_parallelize_1d, NULL, 0, (void*) compute_1d_tile_1d, &context, tile_range, flags); } } @@ -757,7 +882,7 @@ void pthreadpool_parallelize_2d( .range_j = fxdiv_init_size_t(range_j) }; pthreadpool_parallelize( - threadpool, &thread_parallelize_1d, + threadpool, &thread_parallelize_1d, NULL, 0, (void*) compute_2d, &context, range_i * range_j, flags); } } @@ -817,7 +942,7 @@ void pthreadpool_parallelize_2d_tile_1d( .tile_j = tile_j }; pthreadpool_parallelize( - threadpool, &thread_parallelize_1d, + threadpool, &thread_parallelize_1d, NULL, 0, (void*) compute_2d_tile_1d, &context, range_i * tile_range_j, flags); } } @@ -883,11 +1008,94 @@ void pthreadpool_parallelize_2d_tile_2d( .tile_j = tile_j }; pthreadpool_parallelize( - threadpool, &thread_parallelize_1d, + threadpool, &thread_parallelize_1d, NULL, 0, (void*) compute_2d_tile_2d, &context, tile_range_i * tile_range_j, flags); } } +struct compute_2d_tile_2d_with_uarch_context { + pthreadpool_task_2d_tile_2d_with_id_t task; + void* argument; + struct fxdiv_divisor_size_t tile_range_j; + size_t range_i; + size_t range_j; + size_t tile_i; + size_t tile_j; +}; + +static void compute_2d_tile_2d_with_uarch(const struct compute_2d_tile_2d_with_uarch_context* context, uint32_t uarch_index, size_t linear_index) { + const struct fxdiv_divisor_size_t tile_range_j = context->tile_range_j; + const struct fxdiv_result_size_t tile_index = fxdiv_divide_size_t(linear_index, tile_range_j); + const size_t max_tile_i = context->tile_i; + const size_t max_tile_j = context->tile_j; + const size_t index_i = tile_index.quotient * max_tile_i; + const size_t index_j = tile_index.remainder * max_tile_j; + const size_t tile_i = min(max_tile_i, context->range_i - index_i); + const size_t tile_j = min(max_tile_j, context->range_j - index_j); + context->task(context->argument, uarch_index, index_i, index_j, tile_i, tile_j); +} + +void pthreadpool_parallelize_2d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_2d_with_id_t task, + void* argument, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags) +{ + if (threadpool == NULL || threadpool->threads_count <= 1 || (range_i <= tile_i && range_j <= tile_j)) { + /* No thread pool used: execute task sequentially on the calling thread */ + + uint32_t uarch_index = default_uarch_index; + #if PTHREADPOOL_USE_CPUINFO + if (threadpool && threadpool->cpuinfo_is_initialized) { + uarch_index = cpuinfo_get_current_uarch_index(); + if (uarch_index > max_uarch_index) { + uarch_index = default_uarch_index; + } + } + #endif + + struct fpu_state saved_fpu_state = { 0 }; + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + saved_fpu_state = get_fpu_state(); + disable_fpu_denormals(); + } + for (size_t i = 0; i < range_i; i += tile_i) { + for (size_t j = 0; j < range_j; j += tile_j) { + task(argument, uarch_index, i, j, min(range_i - i, tile_i), min(range_j - j, tile_j)); + } + } + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + set_fpu_state(saved_fpu_state); + } + } else { + /* Execute in parallel on the thread pool using linearized index */ + const size_t tile_range_i = divide_round_up(range_i, tile_i); + const size_t tile_range_j = divide_round_up(range_j, tile_j); + const struct pthreadpool_1d_with_uarch_params params = { + .default_uarch_index = default_uarch_index, + .max_uarch_index = max_uarch_index, + }; + struct compute_2d_tile_2d_with_uarch_context context = { + .task = task, + .argument = argument, + .tile_range_j = fxdiv_init_size_t(tile_range_j), + .range_i = range_i, + .range_j = range_j, + .tile_i = tile_i, + .tile_j = tile_j + }; + pthreadpool_parallelize( + threadpool, &thread_parallelize_1d_with_uarch, ¶ms, sizeof(params), + (void*) compute_2d_tile_2d_with_uarch, &context, tile_range_i * tile_range_j, flags); + } +} + struct compute_3d_tile_2d_context { pthreadpool_task_3d_tile_2d_t task; void* argument; @@ -957,11 +1165,102 @@ void pthreadpool_parallelize_3d_tile_2d( .tile_k = tile_k }; pthreadpool_parallelize( - threadpool, &thread_parallelize_1d, + threadpool, &thread_parallelize_1d, NULL, 0, (void*) compute_3d_tile_2d, &context, range_i * tile_range_j * tile_range_k, flags); } } +struct compute_3d_tile_2d_with_uarch_context { + pthreadpool_task_3d_tile_2d_with_id_t task; + void* argument; + struct fxdiv_divisor_size_t tile_range_j; + struct fxdiv_divisor_size_t tile_range_k; + size_t range_j; + size_t range_k; + size_t tile_j; + size_t tile_k; +}; + +static void compute_3d_tile_2d_with_uarch(const struct compute_3d_tile_2d_with_uarch_context* context, uint32_t uarch_index, size_t linear_index) { + const struct fxdiv_divisor_size_t tile_range_k = context->tile_range_k; + const struct fxdiv_result_size_t tile_index_ij_k = fxdiv_divide_size_t(linear_index, tile_range_k); + const struct fxdiv_divisor_size_t tile_range_j = context->tile_range_j; + const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_k.quotient, tile_range_j); + const size_t max_tile_j = context->tile_j; + const size_t max_tile_k = context->tile_k; + const size_t index_i = tile_index_i_j.quotient; + const size_t index_j = tile_index_i_j.remainder * max_tile_j; + const size_t index_k = tile_index_ij_k.remainder * max_tile_k; + const size_t tile_j = min(max_tile_j, context->range_j - index_j); + const size_t tile_k = min(max_tile_k, context->range_k - index_k); + context->task(context->argument, uarch_index, index_i, index_j, index_k, tile_j, tile_k); +} + +void pthreadpool_parallelize_3d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_2d_with_id_t task, + void* argument, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags) +{ + if (threadpool == NULL || threadpool->threads_count <= 1 || (range_i <= 1 && range_j <= tile_j && range_k <= tile_k)) { + /* No thread pool used: execute task sequentially on the calling thread */ + + uint32_t uarch_index = default_uarch_index; + #if PTHREADPOOL_USE_CPUINFO + if (threadpool && threadpool->cpuinfo_is_initialized) { + uarch_index = cpuinfo_get_current_uarch_index(); + if (uarch_index > max_uarch_index) { + uarch_index = default_uarch_index; + } + } + #endif + + struct fpu_state saved_fpu_state = { 0 }; + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + saved_fpu_state = get_fpu_state(); + disable_fpu_denormals(); + } + for (size_t i = 0; i < range_i; i++) { + for (size_t j = 0; j < range_j; j += tile_j) { + for (size_t k = 0; k < range_k; k += tile_k) { + task(argument, uarch_index, i, j, k, min(range_j - j, tile_j), min(range_k - k, tile_k)); + } + } + } + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + set_fpu_state(saved_fpu_state); + } + } else { + /* Execute in parallel on the thread pool using linearized index */ + const size_t tile_range_j = divide_round_up(range_j, tile_j); + const size_t tile_range_k = divide_round_up(range_k, tile_k); + const struct pthreadpool_1d_with_uarch_params params = { + .default_uarch_index = default_uarch_index, + .max_uarch_index = max_uarch_index, + }; + struct compute_3d_tile_2d_with_uarch_context context = { + .task = task, + .argument = argument, + .tile_range_j = fxdiv_init_size_t(tile_range_j), + .tile_range_k = fxdiv_init_size_t(tile_range_k), + .range_j = range_j, + .range_k = range_k, + .tile_j = tile_j, + .tile_k = tile_k + }; + pthreadpool_parallelize( + threadpool, &thread_parallelize_1d_with_uarch, ¶ms, sizeof(params), + (void*) compute_3d_tile_2d_with_uarch, &context, range_i * tile_range_j * tile_range_k, flags); + } +} + struct compute_4d_tile_2d_context { pthreadpool_task_4d_tile_2d_t task; void* argument; @@ -1040,11 +1339,111 @@ void pthreadpool_parallelize_4d_tile_2d( .tile_l = tile_l }; pthreadpool_parallelize( - threadpool, &thread_parallelize_1d, + threadpool, &thread_parallelize_1d, NULL, 0, (void*) compute_4d_tile_2d, &context, range_i * range_j * tile_range_k * tile_range_l, flags); } } +struct compute_4d_tile_2d_with_uarch_context { + pthreadpool_task_4d_tile_2d_with_id_t task; + void* argument; + struct fxdiv_divisor_size_t tile_range_kl; + struct fxdiv_divisor_size_t range_j; + struct fxdiv_divisor_size_t tile_range_l; + size_t range_k; + size_t range_l; + size_t tile_k; + size_t tile_l; +}; + +static void compute_4d_tile_2d_with_uarch(const struct compute_4d_tile_2d_with_uarch_context* context, uint32_t uarch_index, size_t linear_index) { + const struct fxdiv_divisor_size_t tile_range_kl = context->tile_range_kl; + const struct fxdiv_result_size_t tile_index_ij_kl = fxdiv_divide_size_t(linear_index, tile_range_kl); + const struct fxdiv_divisor_size_t range_j = context->range_j; + const struct fxdiv_result_size_t tile_index_i_j = fxdiv_divide_size_t(tile_index_ij_kl.quotient, range_j); + const struct fxdiv_divisor_size_t tile_range_l = context->tile_range_l; + const struct fxdiv_result_size_t tile_index_k_l = fxdiv_divide_size_t(tile_index_ij_kl.remainder, tile_range_l); + const size_t max_tile_k = context->tile_k; + const size_t max_tile_l = context->tile_l; + const size_t index_i = tile_index_i_j.quotient; + const size_t index_j = tile_index_i_j.remainder; + const size_t index_k = tile_index_k_l.quotient * max_tile_k; + const size_t index_l = tile_index_k_l.remainder * max_tile_l; + const size_t tile_k = min(max_tile_k, context->range_k - index_k); + const size_t tile_l = min(max_tile_l, context->range_l - index_l); + context->task(context->argument, uarch_index, index_i, index_j, index_k, index_l, tile_k, tile_l); +} + +void pthreadpool_parallelize_4d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_2d_with_id_t task, + void* argument, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags) +{ + if (threadpool == NULL || threadpool->threads_count <= 1 || ((range_i | range_j) <= 1 && range_k <= tile_k && range_l <= tile_l)) { + /* No thread pool used: execute task sequentially on the calling thread */ + + uint32_t uarch_index = default_uarch_index; + #if PTHREADPOOL_USE_CPUINFO + if (threadpool && threadpool->cpuinfo_is_initialized) { + uarch_index = cpuinfo_get_current_uarch_index(); + if (uarch_index > max_uarch_index) { + uarch_index = default_uarch_index; + } + } + #endif + + struct fpu_state saved_fpu_state = { 0 }; + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + saved_fpu_state = get_fpu_state(); + disable_fpu_denormals(); + } + for (size_t i = 0; i < range_i; i++) { + for (size_t j = 0; j < range_j; j++) { + for (size_t k = 0; k < range_k; k += tile_k) { + for (size_t l = 0; l < range_l; l += tile_l) { + task(argument, uarch_index, i, j, k, l, + min(range_k - k, tile_k), min(range_l - l, tile_l)); + } + } + } + } + if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) { + set_fpu_state(saved_fpu_state); + } + } else { + /* Execute in parallel on the thread pool using linearized index */ + const size_t tile_range_k = divide_round_up(range_k, tile_k); + const size_t tile_range_l = divide_round_up(range_l, tile_l); + const struct pthreadpool_1d_with_uarch_params params = { + .default_uarch_index = default_uarch_index, + .max_uarch_index = max_uarch_index, + }; + struct compute_4d_tile_2d_with_uarch_context context = { + .task = task, + .argument = argument, + .tile_range_kl = fxdiv_init_size_t(tile_range_k * tile_range_l), + .range_j = fxdiv_init_size_t(range_j), + .tile_range_l = fxdiv_init_size_t(tile_range_l), + .range_k = range_k, + .range_l = range_l, + .tile_k = tile_k, + .tile_l = tile_l + }; + pthreadpool_parallelize( + threadpool, &thread_parallelize_1d_with_uarch, ¶ms, sizeof(params), + (void*) compute_4d_tile_2d_with_uarch, &context, range_i * range_j * tile_range_k * tile_range_l, flags); + } +} + struct compute_5d_tile_2d_context { pthreadpool_task_5d_tile_2d_t task; void* argument; @@ -1132,7 +1531,7 @@ void pthreadpool_parallelize_5d_tile_2d( .tile_m = tile_m, }; pthreadpool_parallelize( - threadpool, &thread_parallelize_1d, + threadpool, &thread_parallelize_1d, NULL, 0, (void*) compute_5d_tile_2d, &context, range_i * range_j * range_k * tile_range_l * tile_range_m, flags); } } @@ -1232,7 +1631,7 @@ void pthreadpool_parallelize_6d_tile_2d( .tile_n = tile_n, }; pthreadpool_parallelize( - threadpool, &thread_parallelize_1d, + threadpool, &thread_parallelize_1d, NULL, 0, (void*) compute_6d_tile_2d, &context, range_i * range_j * range_k * range_l * tile_range_m * tile_range_n, flags); } } @@ -1289,6 +1688,11 @@ void pthreadpool_destroy(struct pthreadpool* threadpool) { pthread_cond_destroy(&threadpool->command_condvar); #endif } + #if PTHREADPOOL_USE_CPUINFO + if (threadpool->cpuinfo_is_initialized) { + cpuinfo_deinitialize(); + } + #endif #ifdef _WIN32 _aligned_free(threadpool); #else diff --git a/src/threadpool-shim.c b/src/threadpool-shim.c index c8ef51d..b5670ea 100644 --- a/src/threadpool-shim.c +++ b/src/threadpool-shim.c @@ -28,6 +28,20 @@ void pthreadpool_parallelize_1d( } } +void pthreadpool_parallelize_1d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_1d_with_id_t task, + void* argument, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range, + uint32_t flags) +{ + for (size_t i = 0; i < range; i++) { + task(argument, default_uarch_index, i); + } +} + void pthreadpool_parallelize_1d_tile_1d( pthreadpool_t threadpool, pthreadpool_task_1d_tile_1d_t task, @@ -89,6 +103,26 @@ void pthreadpool_parallelize_2d_tile_2d( } } +void pthreadpool_parallelize_2d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_2d_tile_2d_with_id_t task, + void* argument, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t tile_i, + size_t tile_j, + uint32_t flags) +{ + for (size_t i = 0; i < range_i; i += tile_i) { + for (size_t j = 0; j < range_j; j += tile_j) { + task(argument, default_uarch_index, i, j, + min(range_i - i, tile_i), min(range_j - j, tile_j)); + } + } +} + void pthreadpool_parallelize_3d_tile_2d( pthreadpool_t threadpool, pthreadpool_task_3d_tile_2d_t task, @@ -110,6 +144,29 @@ void pthreadpool_parallelize_3d_tile_2d( } } +void pthreadpool_parallelize_3d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_3d_tile_2d_with_id_t task, + void* argument, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t tile_j, + size_t tile_k, + uint32_t flags) +{ + for (size_t i = 0; i < range_i; i++) { + for (size_t j = 0; j < range_j; j += tile_j) { + for (size_t k = 0; k < range_k; k += tile_k) { + task(argument, default_uarch_index, i, j, k, + min(range_j - j, tile_j), min(range_k - k, tile_k)); + } + } + } +} + void pthreadpool_parallelize_4d_tile_2d( pthreadpool_t threadpool, pthreadpool_task_4d_tile_2d_t task, @@ -134,6 +191,32 @@ void pthreadpool_parallelize_4d_tile_2d( } } +void pthreadpool_parallelize_4d_tile_2d_with_uarch( + pthreadpool_t threadpool, + pthreadpool_task_4d_tile_2d_with_id_t task, + void* argument, + uint32_t default_uarch_index, + uint32_t max_uarch_index, + size_t range_i, + size_t range_j, + size_t range_k, + size_t range_l, + size_t tile_k, + size_t tile_l, + uint32_t flags) +{ + for (size_t i = 0; i < range_i; i++) { + for (size_t j = 0; j < range_j; j++) { + for (size_t k = 0; k < range_k; k += tile_k) { + for (size_t l = 0; l < range_l; l += tile_l) { + task(argument, default_uarch_index, i, j, k, l, + min(range_k - k, tile_k), min(range_l - l, tile_l)); + } + } + } + } +} + void pthreadpool_parallelize_5d_tile_2d( pthreadpool_t threadpool, pthreadpool_task_5d_tile_2d_t task, diff --git a/test/pthreadpool.cc b/test/pthreadpool.cc index b80d98d..b8a6803 100644 --- a/test/pthreadpool.cc +++ b/test/pthreadpool.cc @@ -54,6 +54,9 @@ const size_t kIncrementIterations = 101; const size_t kIncrementIterations5D = 7; const size_t kIncrementIterations6D = 3; +const uint32_t kMaxUArchIndex = 0; +const uint32_t kDefaultUArchIndex = 42; + TEST(CreateAndDestroy, NullThreadPool) { pthreadpool* threadpool = nullptr; @@ -326,6 +329,321 @@ TEST(Parallelize1D, MultiThreadPoolWorkStealing) { EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DRange); } +static void ComputeNothing1DWithUArch(void*, uint32_t, size_t) { +} + +TEST(Parallelize1DWithUArch, SingleThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_1d_with_uarch(threadpool.get(), + ComputeNothing1DWithUArch, + nullptr, + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); +} + +TEST(Parallelize1DWithUArch, MultiThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + ComputeNothing1DWithUArch, + nullptr, + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); +} + +static void CheckUArch1DWithUArch(void*, uint32_t uarch_index, size_t) { + if (uarch_index != kDefaultUArchIndex) { + EXPECT_LE(uarch_index, kMaxUArchIndex); + } +} + +TEST(Parallelize1DWithUArch, SingleThreadPoolUArchInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_1d_with_uarch(threadpool.get(), + CheckUArch1DWithUArch, + nullptr, + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); +} + +TEST(Parallelize1DWithUArch, MultiThreadPoolUArchInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + CheckUArch1DWithUArch, + nullptr, + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); +} + +static void CheckBounds1DWithUArch(void*, uint32_t, size_t i) { + EXPECT_LT(i, kParallelize1DRange); +} + +TEST(Parallelize1DWithUArch, SingleThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + CheckBounds1DWithUArch, + nullptr, + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); +} + +TEST(Parallelize1DWithUArch, MultiThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + CheckBounds1DWithUArch, + nullptr, + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); +} + +static void SetTrue1DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t i) { + processed_indicators[i].store(true, std::memory_order_relaxed); +} + +TEST(Parallelize1DWithUArch, SingleThreadPoolAllItemsProcessed) { + std::vector<std::atomic_bool> indicators(kParallelize1DRange); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_1d_with_id_t>(SetTrue1DWithUArch), + static_cast<void*>(indicators.data()), + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize1DRange; i++) { + EXPECT_TRUE(indicators[i].load(std::memory_order_relaxed)) + << "Element " << i << " not processed"; + } +} + +TEST(Parallelize1DWithUArch, MultiThreadPoolAllItemsProcessed) { + std::vector<std::atomic_bool> indicators(kParallelize1DRange); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_1d_with_id_t>(SetTrue1DWithUArch), + static_cast<void*>(indicators.data()), + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize1DRange; i++) { + EXPECT_TRUE(indicators[i].load(std::memory_order_relaxed)) + << "Element " << i << " not processed"; + } +} + +static void Increment1DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t i) { + processed_counters[i].fetch_add(1, std::memory_order_relaxed); +} + +TEST(Parallelize1DWithUArch, SingleThreadPoolEachItemProcessedOnce) { + std::vector<std::atomic_int> counters(kParallelize1DRange); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize1DRange; i++) { + EXPECT_EQ(counters[i].load(std::memory_order_relaxed), 1) + << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times (expected: 1)"; + } +} + +TEST(Parallelize1DWithUArch, MultiThreadPoolEachItemProcessedOnce) { + std::vector<std::atomic_int> counters(kParallelize1DRange); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize1DRange; i++) { + EXPECT_EQ(counters[i].load(std::memory_order_relaxed), 1) + << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times (expected: 1)"; + } +} + +TEST(Parallelize1DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) { + std::vector<std::atomic_int> counters(kParallelize1DRange); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) { + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize1DRange; i++) { + EXPECT_EQ(counters[i].load(std::memory_order_relaxed), kIncrementIterations) + << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations << ")"; + } +} + +TEST(Parallelize1DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) { + std::vector<std::atomic_int> counters(kParallelize1DRange); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) { + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_1d_with_id_t>(Increment1DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize1DRange; i++) { + EXPECT_EQ(counters[i].load(std::memory_order_relaxed), kIncrementIterations) + << "Element " << i << " was processed " << counters[i].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations << ")"; + } +} + +static void IncrementSame1DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i) { + num_processed_items->fetch_add(1, std::memory_order_relaxed); +} + +TEST(Parallelize1DWithUArch, MultiThreadPoolHighContention) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_1d_with_id_t>(IncrementSame1DWithUArch), + static_cast<void*>(&num_processed_items), + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DRange); +} + +static void WorkImbalance1DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i) { + num_processed_items->fetch_add(1, std::memory_order_relaxed); + if (i == 0) { + /* Spin-wait until all items are computed */ + while (num_processed_items->load(std::memory_order_relaxed) != kParallelize1DRange) { + std::atomic_thread_fence(std::memory_order_acquire); + } + } +} + +TEST(Parallelize1DWithUArch, MultiThreadPoolWorkStealing) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_1d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_1d_with_id_t>(WorkImbalance1DWithUArch), + static_cast<void*>(&num_processed_items), + kDefaultUArchIndex, + kMaxUArchIndex, + kParallelize1DRange, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize1DRange); +} + static void ComputeNothing1DTile1D(void*, size_t, size_t) { } @@ -1569,6 +1887,405 @@ TEST(Parallelize2DTile2D, MultiThreadPoolWorkStealing) { EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ); } +static void ComputeNothing2DTile2DWithUArch(void*, uint32_t, size_t, size_t, size_t, size_t) { +} + +TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_2d_tile_2d_with_uarch(threadpool.get(), + ComputeNothing2DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); +} + +TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + ComputeNothing2DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); +} + +static void CheckUArch2DTile2DWithUArch(void*, uint32_t uarch_index, size_t, size_t, size_t, size_t) { + if (uarch_index != kDefaultUArchIndex) { + EXPECT_LE(uarch_index, kMaxUArchIndex); + } +} + +TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolUArchInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + CheckUArch2DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); +} + +TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolUArchInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + CheckUArch2DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); +} + +static void CheckBounds2DTile2DWithUArch(void*, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) { + EXPECT_LT(start_i, kParallelize2DTile2DRangeI); + EXPECT_LT(start_j, kParallelize2DTile2DRangeJ); + EXPECT_LE(start_i + tile_i, kParallelize2DTile2DRangeI); + EXPECT_LE(start_j + tile_j, kParallelize2DTile2DRangeJ); +} + +TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + CheckBounds2DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); +} + +TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + CheckBounds2DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); +} + +static void CheckTiling2DTile2DWithUArch(void*, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) { + EXPECT_GT(tile_i, 0); + EXPECT_LE(tile_i, kParallelize2DTile2DTileI); + EXPECT_EQ(start_i % kParallelize2DTile2DTileI, 0); + EXPECT_EQ(tile_i, std::min<size_t>(kParallelize2DTile2DTileI, kParallelize2DTile2DRangeI - start_i)); + + EXPECT_GT(tile_j, 0); + EXPECT_LE(tile_j, kParallelize2DTile2DTileJ); + EXPECT_EQ(start_j % kParallelize2DTile2DTileJ, 0); + EXPECT_EQ(tile_j, std::min<size_t>(kParallelize2DTile2DTileJ, kParallelize2DTile2DRangeJ - start_j)); +} + +TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolUniformTiling) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + CheckTiling2DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); +} + +TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolUniformTiling) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + CheckTiling2DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); +} + +static void SetTrue2DTile2DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) { + for (size_t i = start_i; i < start_i + tile_i; i++) { + for (size_t j = start_j; j < start_j + tile_j; j++) { + const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j; + processed_indicators[linear_idx].store(true, std::memory_order_relaxed); + } + } +} + +TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolAllItemsProcessed) { + std::vector<std::atomic_bool> indicators(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(SetTrue2DTile2DWithUArch), + static_cast<void*>(indicators.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) { + const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j; + EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed)) + << "Element (" << i << ", " << j << ") not processed"; + } + } +} + +TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolAllItemsProcessed) { + std::vector<std::atomic_bool> indicators(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(SetTrue2DTile2DWithUArch), + static_cast<void*>(indicators.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) { + const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j; + EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed)) + << "Element (" << i << ", " << j << ") not processed"; + } + } +} + +static void Increment2DTile2DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) { + for (size_t i = start_i; i < start_i + tile_i; i++) { + for (size_t j = start_j; j < start_j + tile_j; j++) { + const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j; + processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed); + } + } +} + +TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolEachItemProcessedOnce) { + std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) { + const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1) + << "Element (" << i << ", " << j << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)"; + } + } +} + +TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolEachItemProcessedOnce) { + std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) { + const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1) + << "Element (" << i << ", " << j << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)"; + } + } +} + +TEST(Parallelize2DTile2DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) { + std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) { + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) { + const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations) + << "Element (" << i << ", " << j << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations << ")"; + } + } +} + +TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) { + std::vector<std::atomic_int> counters(kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) { + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(Increment2DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize2DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize2DTile2DRangeJ; j++) { + const size_t linear_idx = i * kParallelize2DTile2DRangeJ + j; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations) + << "Element (" << i << ", " << j << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations << ")"; + } + } +} + +static void IncrementSame2DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) { + for (size_t i = start_i; i < start_i + tile_i; i++) { + for (size_t j = start_j; j < start_j + tile_j; j++) { + num_processed_items->fetch_add(1, std::memory_order_relaxed); + } + } +} + +TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolHighContention) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(IncrementSame2DTile2DWithUArch), + static_cast<void*>(&num_processed_items), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ); +} + +static void WorkImbalance2DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t start_i, size_t start_j, size_t tile_i, size_t tile_j) { + num_processed_items->fetch_add(tile_i * tile_j, std::memory_order_relaxed); + if (start_i == 0 && start_j == 0) { + /* Spin-wait until all items are computed */ + while (num_processed_items->load(std::memory_order_relaxed) != kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ) { + std::atomic_thread_fence(std::memory_order_acquire); + } + } +} + +TEST(Parallelize2DTile2DWithUArch, MultiThreadPoolWorkStealing) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_2d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_2d_tile_2d_with_id_t>(WorkImbalance2DTile2DWithUArch), + static_cast<void*>(&num_processed_items), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize2DTile2DRangeI, kParallelize2DTile2DRangeJ, + kParallelize2DTile2DTileI, kParallelize2DTile2DTileJ, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DTile2DRangeI * kParallelize2DTile2DRangeJ); +} + static void ComputeNothing3DTile2D(void*, size_t, size_t, size_t, size_t, size_t) { } @@ -1929,6 +2646,418 @@ TEST(Parallelize3DTile2D, MultiThreadPoolWorkStealing) { EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK); } +static void ComputeNothing3DTile2DWithUArch(void*, uint32_t, size_t, size_t, size_t, size_t, size_t) { +} + +TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_3d_tile_2d_with_uarch(threadpool.get(), + ComputeNothing3DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); +} + +TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + ComputeNothing3DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); +} + +static void CheckUArch3DTile2DWithUArch(void*, uint32_t uarch_index, size_t, size_t, size_t, size_t, size_t) { + if (uarch_index != kDefaultUArchIndex) { + EXPECT_LE(uarch_index, kMaxUArchIndex); + } +} + +TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolUArchInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + CheckUArch3DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); +} + +TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolUArchInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + CheckUArch3DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); +} + +static void CheckBounds3DTile2DWithUArch(void*, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) { + EXPECT_LT(i, kParallelize3DTile2DRangeI); + EXPECT_LT(start_j, kParallelize3DTile2DRangeJ); + EXPECT_LT(start_k, kParallelize3DTile2DRangeK); + EXPECT_LE(start_j + tile_j, kParallelize3DTile2DRangeJ); + EXPECT_LE(start_k + tile_k, kParallelize3DTile2DRangeK); +} + +TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + CheckBounds3DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); +} + +TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + CheckBounds3DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); +} + +static void CheckTiling3DTile2DWithUArch(void*, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) { + EXPECT_GT(tile_j, 0); + EXPECT_LE(tile_j, kParallelize3DTile2DTileJ); + EXPECT_EQ(start_j % kParallelize3DTile2DTileJ, 0); + EXPECT_EQ(tile_j, std::min<size_t>(kParallelize3DTile2DTileJ, kParallelize3DTile2DRangeJ - start_j)); + + EXPECT_GT(tile_k, 0); + EXPECT_LE(tile_k, kParallelize3DTile2DTileK); + EXPECT_EQ(start_k % kParallelize3DTile2DTileK, 0); + EXPECT_EQ(tile_k, std::min<size_t>(kParallelize3DTile2DTileK, kParallelize3DTile2DRangeK - start_k)); +} + +TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolUniformTiling) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + CheckTiling3DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); +} + +TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolUniformTiling) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + CheckTiling3DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); +} + +static void SetTrue3DTile2DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) { + for (size_t j = start_j; j < start_j + tile_j; j++) { + for (size_t k = start_k; k < start_k + tile_k; k++) { + const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k; + processed_indicators[linear_idx].store(true, std::memory_order_relaxed); + } + } +} + +TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolAllItemsProcessed) { + std::vector<std::atomic_bool> indicators(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(SetTrue3DTile2DWithUArch), + static_cast<void*>(indicators.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) { + const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k; + EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed)) + << "Element (" << i << ", " << j << ", " << k << ") not processed"; + } + } + } +} + +TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolAllItemsProcessed) { + std::vector<std::atomic_bool> indicators(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(SetTrue3DTile2DWithUArch), + static_cast<void*>(indicators.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) { + const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k; + EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed)) + << "Element (" << i << ", " << j << ", " << k << ") not processed"; + } + } + } +} + +static void Increment3DTile2DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) { + for (size_t j = start_j; j < start_j + tile_j; j++) { + for (size_t k = start_k; k < start_k + tile_k; k++) { + const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k; + processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed); + } + } +} + +TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolEachItemProcessedOnce) { + std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) { + const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1) + << "Element (" << i << ", " << j << ", " << k << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)"; + } + } + } +} + +TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolEachItemProcessedOnce) { + std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) { + const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1) + << "Element (" << i << ", " << j << ", " << k << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)"; + } + } + } +} + +TEST(Parallelize3DTile2DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) { + std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) { + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) { + const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations) + << "Element (" << i << ", " << j << ", " << k << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations << ")"; + } + } + } +} + +TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) { + std::vector<std::atomic_int> counters(kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) { + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(Increment3DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize3DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize3DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize3DTile2DRangeK; k++) { + const size_t linear_idx = (i * kParallelize3DTile2DRangeJ + j) * kParallelize3DTile2DRangeK + k; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations) + << "Element (" << i << ", " << j << ", " << k << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations << ")"; + } + } + } +} + +static void IncrementSame3DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) { + for (size_t j = start_j; j < start_j + tile_j; j++) { + for (size_t k = start_k; k < start_k + tile_k; k++) { + num_processed_items->fetch_add(1, std::memory_order_relaxed); + } + } +} + +TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolHighContention) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(IncrementSame3DTile2DWithUArch), + static_cast<void*>(&num_processed_items), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK); +} + +static void WorkImbalance3DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t start_j, size_t start_k, size_t tile_j, size_t tile_k) { + num_processed_items->fetch_add(tile_j * tile_k, std::memory_order_relaxed); + if (i == 0 && start_j == 0 && start_k == 0) { + /* Spin-wait until all items are computed */ + while (num_processed_items->load(std::memory_order_relaxed) != kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK) { + std::atomic_thread_fence(std::memory_order_acquire); + } + } +} + +TEST(Parallelize3DTile2DWithUArch, MultiThreadPoolWorkStealing) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_3d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_3d_tile_2d_with_id_t>(WorkImbalance3DTile2DWithUArch), + static_cast<void*>(&num_processed_items), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize3DTile2DRangeI, kParallelize3DTile2DRangeJ, kParallelize3DTile2DRangeK, + kParallelize3DTile2DTileJ, kParallelize3DTile2DTileK, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize3DTile2DRangeI * kParallelize3DTile2DRangeJ * kParallelize3DTile2DRangeK); +} + static void ComputeNothing4DTile2D(void*, size_t, size_t, size_t, size_t, size_t, size_t) { } @@ -2302,6 +3431,431 @@ TEST(Parallelize4DTile2D, MultiThreadPoolWorkStealing) { EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL); } +static void ComputeNothing4DTile2DWithUArch(void*, uint32_t, size_t, size_t, size_t, size_t, size_t, size_t) { +} + +TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_4d_tile_2d_with_uarch(threadpool.get(), + ComputeNothing4DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); +} + +TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolCompletes) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + ComputeNothing4DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); +} + +static void CheckUArch4DTile2DWithUArch(void*, uint32_t uarch_index, size_t, size_t, size_t, size_t, size_t, size_t) { + if (uarch_index != kDefaultUArchIndex) { + EXPECT_LE(uarch_index, kMaxUArchIndex); + } +} + +TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolUArchInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + CheckUArch4DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); +} + +TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolUArchInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + CheckUArch4DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); +} + +static void CheckBounds4DTile2DWithUArch(void*, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) { + EXPECT_LT(i, kParallelize4DTile2DRangeI); + EXPECT_LT(j, kParallelize4DTile2DRangeJ); + EXPECT_LT(start_k, kParallelize4DTile2DRangeK); + EXPECT_LT(start_l, kParallelize4DTile2DRangeL); + EXPECT_LE(start_k + tile_k, kParallelize4DTile2DRangeK); + EXPECT_LE(start_l + tile_l, kParallelize4DTile2DRangeL); +} + +TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + CheckBounds4DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); +} + +TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolAllItemsInBounds) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + CheckBounds4DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); +} + +static void CheckTiling4DTile2DWithUArch(void*, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) { + EXPECT_GT(tile_k, 0); + EXPECT_LE(tile_k, kParallelize4DTile2DTileK); + EXPECT_EQ(start_k % kParallelize4DTile2DTileK, 0); + EXPECT_EQ(tile_k, std::min<size_t>(kParallelize4DTile2DTileK, kParallelize4DTile2DRangeK - start_k)); + + EXPECT_GT(tile_l, 0); + EXPECT_LE(tile_l, kParallelize4DTile2DTileL); + EXPECT_EQ(start_l % kParallelize4DTile2DTileL, 0); + EXPECT_EQ(tile_l, std::min<size_t>(kParallelize4DTile2DTileL, kParallelize4DTile2DRangeL - start_l)); +} + +TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolUniformTiling) { + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + CheckTiling4DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); +} + +TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolUniformTiling) { + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + CheckTiling4DTile2DWithUArch, + nullptr, + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); +} + +static void SetTrue4DTile2DWithUArch(std::atomic_bool* processed_indicators, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) { + for (size_t k = start_k; k < start_k + tile_k; k++) { + for (size_t l = start_l; l < start_l + tile_l; l++) { + const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l; + processed_indicators[linear_idx].store(true, std::memory_order_relaxed); + } + } +} + +TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolAllItemsProcessed) { + std::vector<std::atomic_bool> indicators(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(SetTrue4DTile2DWithUArch), + static_cast<void*>(indicators.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) { + for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) { + const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l; + EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed)) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed"; + } + } + } + } +} + +TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolAllItemsProcessed) { + std::vector<std::atomic_bool> indicators(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(SetTrue4DTile2DWithUArch), + static_cast<void*>(indicators.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) { + for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) { + const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l; + EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed)) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ") not processed"; + } + } + } + } +} + +static void Increment4DTile2DWithUArch(std::atomic_int* processed_counters, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) { + for (size_t k = start_k; k < start_k + tile_k; k++) { + for (size_t l = start_l; l < start_l + tile_l; l++) { + const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l; + processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed); + } + } +} + +TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolEachItemProcessedOnce) { + std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) { + for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) { + const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)"; + } + } + } + } +} + +TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolEachItemProcessedOnce) { + std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); + + for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) { + for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) { + const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)"; + } + } + } + } +} + +TEST(Parallelize4DTile2DWithUArch, SingleThreadPoolEachItemProcessedMultipleTimes) { + std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL); + + auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) { + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) { + for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) { + const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations << ")"; + } + } + } + } +} + +TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolEachItemProcessedMultipleTimes) { + std::vector<std::atomic_int> counters(kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) { + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(Increment4DTile2DWithUArch), + static_cast<void*>(counters.data()), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); + } + + for (size_t i = 0; i < kParallelize4DTile2DRangeI; i++) { + for (size_t j = 0; j < kParallelize4DTile2DRangeJ; j++) { + for (size_t k = 0; k < kParallelize4DTile2DRangeK; k++) { + for (size_t l = 0; l < kParallelize4DTile2DRangeL; l++) { + const size_t linear_idx = ((i * kParallelize4DTile2DRangeJ + j) * kParallelize4DTile2DRangeK + k) * kParallelize4DTile2DRangeL + l; + EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations) + << "Element (" << i << ", " << j << ", " << k << ", " << l << ") was processed " + << counters[linear_idx].load(std::memory_order_relaxed) << " times " + << "(expected: " << kIncrementIterations << ")"; + } + } + } + } +} + +static void IncrementSame4DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) { + for (size_t k = start_k; k < start_k + tile_k; k++) { + for (size_t l = start_l; l < start_l + tile_l; l++) { + num_processed_items->fetch_add(1, std::memory_order_relaxed); + } + } +} + +TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolHighContention) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(IncrementSame4DTile2DWithUArch), + static_cast<void*>(&num_processed_items), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL); +} + +static void WorkImbalance4DTile2DWithUArch(std::atomic_int* num_processed_items, uint32_t, size_t i, size_t j, size_t start_k, size_t start_l, size_t tile_k, size_t tile_l) { + num_processed_items->fetch_add(tile_k * tile_l, std::memory_order_relaxed); + if (i == 0 && j == 0 && start_k == 0 && start_l == 0) { + /* Spin-wait until all items are computed */ + while (num_processed_items->load(std::memory_order_relaxed) != kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL) { + std::atomic_thread_fence(std::memory_order_acquire); + } + } +} + +TEST(Parallelize4DTile2DWithUArch, MultiThreadPoolWorkStealing) { + std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0); + + auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy); + ASSERT_TRUE(threadpool.get()); + + if (pthreadpool_get_threads_count(threadpool.get()) <= 1) { + GTEST_SKIP(); + } + + pthreadpool_parallelize_4d_tile_2d_with_uarch( + threadpool.get(), + reinterpret_cast<pthreadpool_task_4d_tile_2d_with_id_t>(WorkImbalance4DTile2DWithUArch), + static_cast<void*>(&num_processed_items), + kDefaultUArchIndex, kMaxUArchIndex, + kParallelize4DTile2DRangeI, kParallelize4DTile2DRangeJ, kParallelize4DTile2DRangeK, kParallelize4DTile2DRangeL, + kParallelize4DTile2DTileK, kParallelize4DTile2DTileL, + 0 /* flags */); + EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize4DTile2DRangeI * kParallelize4DTile2DRangeJ * kParallelize4DTile2DRangeK * kParallelize4DTile2DRangeL); +} + static void ComputeNothing5DTile2D(void*, size_t, size_t, size_t, size_t, size_t, size_t, size_t) { } |