htrdr_c.h (3665B)
1 /* Copyright (C) 2018-2019, 2022-2025 Centre National de la Recherche Scientifique 2 * Copyright (C) 2020-2022 Institut Mines Télécom Albi-Carmaux 3 * Copyright (C) 2022-2025 Institut Pierre-Simon Laplace 4 * Copyright (C) 2022-2025 Institut de Physique du Globe de Paris 5 * Copyright (C) 2018-2025 |Méso|Star> (contact@meso-star.com) 6 * Copyright (C) 2022-2025 Observatoire de Paris 7 * Copyright (C) 2022-2025 Université de Reims Champagne-Ardenne 8 * Copyright (C) 2022-2025 Université de Versaille Saint-Quentin 9 * Copyright (C) 2018-2019, 2022-2025 Université Paul Sabatier 10 * 11 * This program is free software: you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation, either version 3 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program. If not, see <http://www.gnu.org/licenses/>. */ 23 24 #ifndef HTRDR_C_H 25 #define HTRDR_C_H 26 27 #include <rsys/logger.h> 28 #include <rsys/ref_count.h> 29 #include <rsys/rsys.h> 30 31 #ifndef NDEBUG 32 #define MPI(Func) ASSERT(MPI_##Func == MPI_SUCCESS) 33 #else 34 #define MPI(Func) MPI_##Func 35 #endif 36 37 /* Number of consecutively generated random numbers assigned to a thread */ 38 #define RNG_SEQUENCE_SIZE 10000 39 40 enum htrdr_mpi_message { 41 HTRDR_MPI_PROGRESS_RENDERING, 42 HTRDR_MPI_STEAL_REQUEST, 43 HTRDR_MPI_WORK_STEALING, 44 HTRDR_MPI_TILE_DATA, 45 HTRDR_MPI_CHUNK_DATA 46 }; 47 48 struct s3d_device; 49 50 struct htrdr { 51 struct s3d_device* s3d; 52 53 unsigned nthreads; /* #threads of the process */ 54 55 int mpi_rank; /* Rank of the process in the MPI group */ 56 int mpi_nprocs; /* Overall #processes in the MPI group */ 57 char* mpi_err_str; /* Temp buffer used to store MPI error string */ 58 int8_t* mpi_working_procs; /* Define the rank of active processes */ 59 size_t mpi_nworking_procs; 60 61 /* Process progress percentage */ 62 int32_t* mpi_progress_octree; 63 int32_t* mpi_progress_render; 64 65 struct mutex* mpi_mutex; /* Protect MPI calls from concurrent threads */ 66 67 int verbose; 68 69 struct logger logger; 70 struct mem_allocator* allocator; 71 struct mem_allocator* lifo_allocators; /* Per thread lifo allocator */ 72 73 ref_T ref; 74 }; 75 76 extern LOCAL_SYM void 77 setup_logger 78 (struct htrdr* htrdr); 79 80 /* Return the minimum length in nanometer of the sky spectral bands 81 * clamped to in [range[0], range[1]]. */ 82 extern LOCAL_SYM void 83 send_mpi_progress 84 (struct htrdr* htrdr, 85 const enum htrdr_mpi_message progress, 86 const int32_t percent); 87 88 extern LOCAL_SYM void 89 fetch_mpi_progress 90 (struct htrdr* htrdr, 91 const enum htrdr_mpi_message progress); 92 93 extern LOCAL_SYM void 94 print_mpi_progress 95 (struct htrdr* htrdr, 96 const enum htrdr_mpi_message progress); 97 98 extern LOCAL_SYM void 99 clear_mpi_progress 100 (struct htrdr* htrdr, 101 const enum htrdr_mpi_message progress); 102 103 extern int32_t 104 total_mpi_progress 105 (const struct htrdr* htrdr, 106 const enum htrdr_mpi_message progress); 107 108 static INLINE void 109 update_mpi_progress(struct htrdr* htrdr, const enum htrdr_mpi_message progress) 110 { 111 ASSERT(htrdr); 112 fetch_mpi_progress(htrdr, progress); 113 clear_mpi_progress(htrdr, progress); 114 print_mpi_progress(htrdr, progress); 115 } 116 117 static FINLINE int 118 cmp_dbl(const void* a, const void* b) 119 { 120 const double d0 = *((const double*)a); 121 const double d1 = *((const double*)b); 122 return d0 < d1 ? -1 : (d0 > d1 ? 1 : 0); 123 } 124 125 #endif /* HTRDR_C_H */ 126