stardis-solver

Solve coupled heat transfers
git clone git://git.meso-star.fr/stardis-solver.git
Log | Files | Refs | README | LICENSE

commit 8877e4ab8351f7df07d0f32fcf0d8118e0de1ce1
parent f8ac59923c13813a8d043fad679390e6a9d07548
Author: Vincent Forest <vincent.forest@meso-star.com>
Date:   Thu, 25 Nov 2021 11:45:55 +0100

Add conditionnal support of MPI

The library can now be optionnaly compiled with MPI support. In this
case, on device creation, we check that MPI is initialized and provides
the required thread support.

Note that MPI is still not used to distribute computations between
processes. Only its seminal support is commited here.

Diffstat:
Mcmake/CMakeLists.txt | 16++++++++++++++--
Msrc/sdis_device.c | 167+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 181 insertions(+), 2 deletions(-)

diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt @@ -21,6 +21,9 @@ include(CMakeDependentOption) set(SDIS_SOURCE_DIR ${PROJECT_SOURCE_DIR}/../src) option(NO_TEST "Do not build tests" OFF) +option(USE_MPI + "Enable the support of distributed parallelism \ +using the Message Passing Interface specification." ON) CMAKE_DEPENDENT_OPTION(ALL_TESTS "Perform basic and advanced tests" OFF "NOT NO_TEST" OFF) @@ -49,6 +52,12 @@ include_directories( ${StarEnc3D_INCLUDE_DIR} ${RSys_INCLUDE_DIR}) +if(USE_MPI) + find_package(MPI 2 REQUIRED) + set(CMAKE_C_COMPILER ${MPI_C_COMPILER}) + include_directories(${MPI_INCLUDE_PATH}) +endif() + rcmake_append_runtime_dirs(_runtime_dirs RSys Star2D Star3D StarSP StarEnc2D StarEnc3D) @@ -152,6 +161,10 @@ if(CMAKE_COMPILER_IS_GNUCC) set_target_properties(sdis PROPERTIES LINK_FLAGS "${OpenMP_C_FLAGS}") endif() +if(USE_MPI) + set_target_properties(sdis PROPERTIES COMPILE_DEFINITIONS "SDIS_USE_MPI") +endif() + rcmake_setup_devel(sdis Stardis ${VERSION} sdis_version.h) ############################################################################### @@ -228,7 +241,7 @@ if(NOT NO_TEST) target_link_libraries(test_sdis_solve_probe3 Star3DUT) target_link_libraries(test_sdis_solve_probe3_2d ${MATH_LIB}) target_link_libraries(test_sdis_solve_camera Star3DUT) - + rcmake_copy_runtime_libraries(test_sdis_solid_random_walk_robustness) endif() @@ -242,4 +255,3 @@ install(TARGETS sdis RUNTIME DESTINATION bin) install(FILES ${SDIS_FILES_INC_API} DESTINATION include/) install(FILES ${SDIS_FILES_DOC} DESTINATION share/doc/stardis-solver) - diff --git a/src/sdis_device.c b/src/sdis_device.c @@ -27,9 +27,166 @@ #include <omp.h> +#ifdef SDIS_USE_MPI + #include <mpi.h> +#endif + /******************************************************************************* * Helper functions ******************************************************************************/ +#ifdef SDIS_USE_MPI + +static const char* +mpi_error_string(struct sdis_device* dev, const int mpi_err) +{ + int res_mpi = MPI_SUCCESS; + int len; + ASSERT(dev); + + res_mpi = MPI_Error_string(mpi_err, str_get(&dev->mpi_err_str), &len); + return res_mpi == MPI_SUCCESS + ? str_get(&dev->mpi_err_str) : "Invalid MPI error"; +} + +static const char* +mpi_thread_support_string(const int val) +{ + switch(val) { + case MPI_THREAD_SINGLE: return "MPI_THREAD_SINGLE"; + case MPI_THREAD_FUNNELED: return "MPI_THREAD_FUNNELED"; + case MPI_THREAD_SERIALIZED: return "MPI_THREAD_SERIALIZED"; + case MPI_THREAD_MULTIPLE: return "MPI_THREAD_MULTIPLE"; + default: FATAL("Unreachable code.\n"); break; + } +} + +static res_T +mpi_print_proc_info(struct sdis_device* dev) +{ + char proc_name[MPI_MAX_PROCESSOR_NAME]; + int proc_name_len; + char* proc_names = NULL; + uint32_t* proc_nthreads = NULL; + uint32_t nthreads = 0; + int iproc; + res_T res = RES_OK; + ASSERT(dev); + + /* On process 0, allocate the arrays to stored gathered data */ + if(dev->mpi_rank == 0) { + + /* Allocate the array to store the per process name */ + proc_names = MEM_CALLOC(dev->allocator, (size_t)dev->mpi_nprocs, + MPI_MAX_PROCESSOR_NAME*sizeof(*proc_names)); + if(!proc_names) { + res = RES_MEM_ERR; + log_err(dev, + "Could not allocate the temporary memory for MPI process names -- " + "%s.\n", res_to_cstr(res)); + goto error; + } + + /* Allocate the array to store the per process #threads */ + proc_nthreads = MEM_CALLOC(dev->allocator, (size_t)dev->mpi_nprocs, + sizeof(*proc_nthreads)); + if(!proc_nthreads) { + res = RES_MEM_ERR; + log_err(dev, + "Could not allocate the temporary memory for the #threads of the MPI " + "processes -- %s.\n", res_to_cstr(res)); + goto error; + } + } + + /* Gather the process name to the process 0 */ + MPI(Get_processor_name(proc_name, &proc_name_len)); + MPI(Gather(proc_name, MPI_MAX_PROCESSOR_NAME, MPI_CHAR, proc_names, + MPI_MAX_PROCESSOR_NAME, MPI_CHAR, 0, MPI_COMM_WORLD)); + + /* Gather the #threads to process 0*/ + nthreads = (uint32_t)dev->nthreads; + MPI(Gather(&nthreads, 1, MPI_UINT32_T, proc_nthreads, 1, MPI_UINT32_T, 0, + MPI_COMM_WORLD)); + + if(dev->mpi_rank == 0) { + FOR_EACH(iproc, 0, dev->mpi_nprocs) { + log_info(dev, "Process %d -- %s; #threads: %u\n", + iproc, proc_names + iproc*MPI_MAX_PROCESSOR_NAME, proc_nthreads[iproc]); + } + } + +exit: + if(proc_names) MEM_RM(dev->allocator, proc_names); + if(proc_nthreads) MEM_RM(dev->allocator, proc_nthreads); + return res; +error: + goto exit; +} + +static res_T +mpi_init(struct sdis_device* dev) +{ + int res_mpi = MPI_SUCCESS; + int is_init = 0; + int thread_support = 0; + res_T res = RES_OK; + ASSERT(dev); + + #define CALL_MPI(Func, ErrMsg) { \ + res_mpi = MPI_##Func; \ + if(res_mpi != MPI_SUCCESS) { \ + log_err(dev, ErrMsg" - %s\n", mpi_error_string(dev, res_mpi)); \ + res = RES_UNKNOWN_ERR; \ + goto error; \ + } \ + } (void)0 + + CALL_MPI(Initialized(&is_init), + "Error querying the MPI init state"); + + if(!is_init) { + log_err(dev, + "MPI is not initialized. The MPI_Init[_thread] function must be called " + "priorly to the creation of the Stardis device.\n"); + res = RES_BAD_OP; + goto error; + } + + CALL_MPI(Query_thread(&thread_support), + "Error querying the MPI thread support"); + + if(thread_support < MPI_THREAD_SERIALIZED) { + log_err(dev, + "The provided MPI implementation does not support serialized API calls " + "from multiple threads. The thread support is limited to %s.\n", + mpi_thread_support_string(thread_support)); + res = RES_BAD_OP; + goto error; + } + + CALL_MPI(Comm_rank(MPI_COMM_WORLD, &dev->mpi_rank), + "Error retrieving the MPI rank"); + CALL_MPI(Comm_size(MPI_COMM_WORLD, &dev->mpi_nprocs), + "Error retrieving the size of the MPI group"); + + #undef CALL_MPI + + mpi_print_proc_info(dev); + +exit: + return res; +error: + goto exit; +} + +#endif /* SDIS_USE_MPI */ + +static INLINE int +check_sdis_device_create_args(const struct sdis_device_create_args* args) +{ + return args && args->nthreads_hint != 0; +} + static void device_release(ref_T* ref) { @@ -43,6 +200,9 @@ device_release(ref_T* ref) ASSERT(flist_name_is_empty(&dev->media_names)); flist_name_release(&dev->interfaces_names); flist_name_release(&dev->media_names); +#ifdef SDIS_USE_MPI + str_release(&dev->mpi_err_str); +#endif MEM_RM(dev->allocator, dev); } @@ -88,6 +248,9 @@ sdis_device_create ref_init(&dev->ref); flist_name_init(allocator, &dev->interfaces_names); flist_name_init(allocator, &dev->media_names); +#ifdef SDIS_USE_MPI + str_init(allocator, &dev->mpi_err_str); +#endif if(logger) { dev->logger = logger; @@ -102,6 +265,7 @@ sdis_device_create log_err(dev, "%s: could not create the Star-2D device on Stardis -- %s.\n", FUNC_NAME, res_to_cstr(res)); + goto error; } res = s3d_device_create(log, allocator, 0, &dev->s3d_dev); @@ -112,6 +276,9 @@ sdis_device_create goto error; } + res = mpi_init(dev); + if(res != RES_OK) goto error; + exit: if(out_dev) *out_dev = dev; return res;