rnatm

Load and structure data describing an atmosphere
git clone git://git.meso-star.fr/rnatm.git
Log | Files | Refs | README | LICENSE

rnatm_voxel_partition.c (17384B)


      1 /* Copyright (C) 2022, 2023, 2025 Centre National de la Recherche Scientifique
      2  * Copyright (C) 2022, 2023, 2025 Institut Pierre-Simon Laplace
      3  * Copyright (C) 2022, 2023, 2025 Institut de Physique du Globe de Paris
      4  * Copyright (C) 2022, 2023, 2025 |Méso|Star> (contact@meso-star.com)
      5  * Copyright (C) 2022, 2023, 2025 Observatoire de Paris
      6  * Copyright (C) 2022, 2023, 2025 Université de Reims Champagne-Ardenne
      7  * Copyright (C) 2022, 2023, 2025 Université de Versaille Saint-Quentin
      8  * Copyright (C) 2022, 2023, 2025 Université Paul Sabatier
      9  *
     10  * This program is free software: you can redistribute it and/or modify
     11  * it under the terms of the GNU General Public License as published by
     12  * the Free Software Foundation, either version 3 of the License, or
     13  * (at your option) any later version.
     14  *
     15  * This program is distributed in the hope that it will be useful,
     16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
     18  * GNU General Public License for more details.
     19  *
     20  * You should have received a copy of the GNU General Public License
     21  * along with this program. If not, see <http://www.gnu.org/licenses/>. */
     22 
     23 #include "rnatm_c.h"
     24 #include "rnatm_log.h"
     25 #include "rnatm_voxel_partition.h"
     26 
     27 #include <rsys/condition.h>
     28 #include <rsys/mem_allocator.h>
     29 #include <rsys/mutex.h>
     30 #include <rsys/ref_count.h>
     31 
     32 /* A tile stores N*M voxels where N is the number of voxels and M is the voxel
     33  * width as defined when creating the pool. This width makes it possible to
     34  * store in the same tile the radiative coefficients of a voxel for several
     35  * spectral data. For each voxel, the tile actually stores M voxels stored one
     36  * after the other */
     37  struct tile {
     38   struct list_node node;
     39   float voxels[1]; /* Flexible array member */
     40 };
     41 
     42 struct partition {
     43   struct list_node node;
     44   size_t id; /* Unique identifier of the partition */
     45   struct tile* tile; /* Set of voxels */
     46   struct pool* pool;
     47 
     48   /* Number of references on the partition. Its initial value is set when the
     49    * partition is commited. Once fetched, a reference is given to the caller
     50    * that it then released by calling partition_free */
     51   ATOMIC ref;
     52 };
     53 
     54 struct pool {
     55   /* Allocated partitions and tiles */
     56   struct partition* parts;
     57   char* tiles;
     58   size_t tile_sz; /* Size in bytes of a tile */
     59 
     60   struct list_node parts_free; /* List of free partitions */
     61   /* List of committed partition sorted in ascending order wrt partition id */
     62   struct list_node parts_commit;
     63 
     64   struct list_node tiles_free; /* List of available tiles of voxels */
     65 
     66   /* Dummy voxel returned by a partition when no voxel is reserved for it */
     67   float empty_voxel[NFLOATS_PER_VOXEL];
     68 
     69   struct mutex* mutex;
     70   struct cond* cond_new;
     71   struct cond* cond_fetch;
     72   struct cond* cond_tile;
     73 
     74   size_t next_part_id; /* Identifier of the next partition */
     75   size_t partition_definition; /* #voxels along the 3 axis */
     76   size_t partition_nvoxels; /* Overall number of voxels in a partition */
     77   size_t voxel_width; /* Number of items per voxel data */
     78 
     79   struct mem_allocator* allocator;
     80   ATOMIC error; /* Is the pool not valid? */
     81   ref_T ref;
     82 };
     83 
     84 /*******************************************************************************
     85  * Helper functions
     86  ******************************************************************************/
     87 static INLINE res_T
     88 check_pool_create_args(const struct pool_create_args* args)
     89 {
     90   if(!args
     91   || !args->npartitions
     92   || !args->voxel_width
     93   || args->npartitions < args->npreallocated_partitions
     94   || !IS_POW2(args->partition_definition)) {
     95     return RES_BAD_ARG;
     96   }
     97 
     98   return RES_OK;
     99 }
    100 
    101 static INLINE void
    102 tile_init(struct tile* tile)
    103 {
    104   ASSERT(tile);
    105   list_init(&tile->node);
    106 }
    107 
    108 static INLINE void
    109 tile_release(struct tile* tile)
    110 {
    111   ASSERT(tile && is_list_empty(&tile->node));
    112   (void)tile;
    113 }
    114 
    115 static void
    116 partition_init
    117   (struct pool* pool,
    118    struct partition* partition)
    119 {
    120   ASSERT(pool && partition);
    121   list_init(&partition->node);
    122   partition->id = SIZE_MAX;
    123   partition->tile = NULL;
    124   partition->pool = pool;
    125   partition->ref = 0;
    126 }
    127 
    128 static void
    129 partition_release(struct partition* partition)
    130 {
    131   ASSERT(partition && is_list_empty(&partition->node));
    132   if(partition->tile) {
    133     ASSERT(is_list_empty(&partition->tile->node));
    134     list_add(&partition->pool->tiles_free, &partition->tile->node);
    135   }
    136 }
    137 
    138 static struct partition*
    139 get_free_partition(struct pool* pool)
    140 {
    141   struct list_node* node = NULL;
    142   struct partition* partition = NULL;
    143   ASSERT(pool);
    144 
    145   mutex_lock(pool->mutex);
    146 
    147   /* Waits for a free partition */
    148   while(is_list_empty(&pool->parts_free) && !pool->error) {
    149     cond_wait(pool->cond_new, pool->mutex);
    150   }
    151 
    152   if(pool->error) {
    153     /* An error occurs */
    154     partition = NULL;
    155     mutex_unlock(pool->mutex);
    156 
    157   } else {
    158     /* Retrieve the next available partition */
    159     node = list_head(&pool->parts_free);
    160     list_del(node);
    161     mutex_unlock(pool->mutex);
    162 
    163     partition = CONTAINER_OF(node, struct partition, node);
    164     partition->id = SIZE_MAX;
    165   }
    166   return partition;
    167 }
    168 
    169 static res_T
    170 reserve_partition_voxels(struct partition* partition)
    171 {
    172   struct pool* pool = NULL;
    173   res_T res = RES_OK;
    174   ASSERT(partition && !partition->tile);
    175 
    176   pool = partition->pool;
    177 
    178   mutex_lock(partition->pool->mutex);
    179 
    180   /* Waits for a free tile */
    181   while(is_list_empty(&pool->tiles_free) && !pool->error) {
    182     cond_wait(pool->cond_tile, pool->mutex);
    183   }
    184 
    185   if(pool->error) {
    186     /* An error occurs */
    187     mutex_unlock(pool->mutex);
    188     res = RES_UNKNOWN_ERR;
    189 
    190   } else {
    191     struct list_node* node = NULL;
    192 
    193     /* Get an available voxel tile */
    194     node = list_head(&pool->tiles_free);
    195     list_del(node);
    196     mutex_unlock(pool->mutex);
    197 
    198     partition->tile = CONTAINER_OF(node, struct tile, node);
    199   }
    200   return res;
    201 }
    202 
    203 static void
    204 release_pool(ref_T* ref)
    205 {
    206   struct pool* pool = CONTAINER_OF(ref, struct pool, ref);
    207   struct list_node* node = NULL;
    208   struct list_node* tmp_node = NULL;
    209   ASSERT(ref);
    210 
    211   if(pool->mutex) mutex_destroy(pool->mutex);
    212   if(pool->cond_new) cond_destroy(pool->cond_new);
    213   if(pool->cond_fetch) cond_destroy(pool->cond_fetch);
    214   if(pool->cond_tile) cond_destroy(pool->cond_tile);
    215 
    216   LIST_FOR_EACH_SAFE(node, tmp_node, &pool->parts_free) {
    217     struct partition* partition = CONTAINER_OF(node, struct partition, node);
    218     list_del(node);
    219     partition_release(partition);
    220   }
    221 
    222   LIST_FOR_EACH_SAFE(node, tmp_node, &pool->parts_commit) {
    223     struct partition* partition = CONTAINER_OF(node, struct partition, node);
    224     list_del(node);
    225     partition_release(partition);
    226   }
    227 
    228   LIST_FOR_EACH_SAFE(node, tmp_node, &pool->tiles_free) {
    229     struct tile* tile = CONTAINER_OF(node, struct tile, node);
    230     list_del(node);
    231     tile_release(tile);
    232   }
    233 
    234   ASSERT(is_list_empty(&pool->parts_free));
    235   ASSERT(is_list_empty(&pool->parts_commit));
    236   ASSERT(is_list_empty(&pool->tiles_free));
    237 
    238   MEM_RM(pool->allocator, pool->parts);
    239   MEM_RM(pool->allocator, pool->tiles);
    240   MEM_RM(pool->allocator, pool);
    241 }
    242 
    243 /*******************************************************************************
    244  * Partition of voxels
    245  ******************************************************************************/
    246 void
    247 partition_free(struct partition* partition)
    248 {
    249   struct pool* pool = NULL;
    250   int free_tile = 0;
    251   ASSERT(partition);
    252 
    253   pool = partition->pool;
    254 
    255   /* The partition reference counter can be zero before being decremented: it is
    256    * initialized to the voxel width once the partition has been commited. So, if
    257    * the partition is released before any commit (for example, when it doesn't
    258    * overlap the atmosphere grid), it can be negative after it has been
    259    * decremented */
    260   if(ATOMIC_DECR(&partition->ref) > 0)
    261     return; /* The partition is still referenced */
    262 
    263   mutex_lock(pool->mutex);
    264   list_move_tail(&partition->node, &pool->parts_free); /* Free the partition */
    265   partition->ref = 0; /* Reset to 0 rather than letting a negative value */
    266   if(partition->tile) { /* Free the reserved tile */
    267     list_move_tail(&partition->tile->node, &pool->tiles_free);
    268     partition->tile = NULL;
    269     free_tile = 1;
    270   }
    271   mutex_unlock(pool->mutex);
    272 
    273   /* Notify a thread waiting for a free partition that we just registered one */
    274   cond_signal(pool->cond_new);
    275 
    276   if(free_tile) {
    277     /* Notify a partition waiting for a free tile that we just registered one */
    278     cond_signal(pool->cond_tile);
    279   }
    280 }
    281 
    282 void
    283 partition_empty(struct partition* partition)
    284 {
    285   ASSERT(partition && partition->tile);
    286 
    287   mutex_lock(partition->pool->mutex);
    288   list_move_tail(&partition->tile->node, &partition->pool->tiles_free);
    289   partition->tile = NULL;
    290   mutex_unlock(partition->pool->mutex);
    291 
    292   /* Notify a partition waiting for a free tile that we just registered one */
    293   cond_signal(partition->pool->cond_tile);
    294 }
    295 
    296 void
    297 partition_commit(struct partition* partition, const size_t refs_count)
    298 {
    299   struct list_node* node = NULL;
    300   struct pool* pool = NULL;
    301   ASSERT(partition);
    302   ASSERT(refs_count && refs_count <= partition->pool->voxel_width);
    303 
    304   pool = partition->pool;
    305 
    306   /* Setup the number of partition references */
    307   partition->ref = (ATOMIC)refs_count;
    308 
    309   /* Committed partitions are sorted in ascending order of their id. We are
    310    * therefore looking for the partition whose id is less than the id of the
    311    * partition to be committed in order to add the latter in the right place */
    312   mutex_lock(pool->mutex);
    313   LIST_FOR_EACH_REVERSE(node, &pool->parts_commit) {
    314     struct partition* partition2 = CONTAINER_OF(node, struct partition, node);
    315     if(partition2->id < partition->id) break;
    316   }
    317   list_add(node, &partition->node);
    318   mutex_unlock(pool->mutex);
    319 
    320   /* Notify the threads waiting for a valid partition that we just registered
    321    * one. Note that a partition can register several items when voxel_width > 1
    322    * and therefore several threads can wait for the same partition.
    323    * Consequently, we broadcast the signal to all threads that are blocked on
    324    * fetch condition */
    325   cond_broadcast(pool->cond_fetch);
    326 }
    327 
    328 size_t
    329 partition_get_id(const struct partition* partition)
    330 {
    331   ASSERT(partition);
    332   return partition->id;
    333 }
    334 
    335 size_t
    336 partition_get_definition(const struct partition* partition)
    337 {
    338   ASSERT(partition);
    339   return partition->pool->partition_definition;
    340 }
    341 
    342 float*
    343 partition_get_voxel
    344   (struct partition* part,
    345    const size_t ivoxel,
    346    const size_t iitem)
    347 {
    348   ASSERT(part && ivoxel < part->pool->partition_nvoxels && part->tile != NULL);
    349   ASSERT(iitem < part->pool->voxel_width);
    350   return part->tile->voxels
    351     + NFLOATS_PER_VOXEL*(ivoxel*part->pool->voxel_width + iitem);
    352 }
    353 
    354 const float*
    355 partition_cget_voxel
    356   (struct partition* part,
    357    const size_t ivoxel,
    358    const size_t iitem)
    359 {
    360   ASSERT(part && ivoxel < part->pool->partition_nvoxels);
    361   ASSERT(iitem < part->pool->voxel_width);
    362   if(part->tile == NULL) {
    363     return part->pool->empty_voxel;
    364   } else {
    365     return partition_get_voxel(part, ivoxel, iitem);
    366   }
    367 }
    368 
    369 void
    370 partition_clear_voxels(struct partition* partition)
    371 {
    372   size_t ivoxel = 0;
    373   ASSERT(partition);
    374 
    375   if(partition->tile == NULL) return; /* Nothing to do */
    376 
    377   FOR_EACH(ivoxel, 0, partition->pool->partition_nvoxels) {
    378     size_t iitem = 0;
    379     FOR_EACH(iitem, 0, partition->pool->voxel_width) {
    380       float* voxel = partition_get_voxel(partition, ivoxel, iitem);
    381       voxel_clear(voxel);
    382     }
    383   }
    384 }
    385 
    386 void
    387 partition_accum(struct partition* dst, struct partition* src)
    388 {
    389   size_t ivoxel = 0;
    390   ASSERT(dst->pool->partition_nvoxels == src->pool->partition_nvoxels);
    391   ASSERT(dst->pool->voxel_width == src->pool->voxel_width);
    392   ASSERT(src->tile && dst->tile); /* Partition cannot be empty */
    393 
    394   FOR_EACH(ivoxel, 0, src->pool->partition_nvoxels) {
    395     size_t iitem = 0;
    396     FOR_EACH(iitem, 0, src->pool->voxel_width) {
    397       const float* src_voxel = partition_cget_voxel(src, ivoxel, iitem);
    398       float* dst_voxel = partition_get_voxel(dst, ivoxel, iitem);
    399       voxel_accum(dst_voxel, src_voxel);
    400     }
    401   }
    402 }
    403 
    404 /*******************************************************************************
    405  * Pool of partitions
    406  ******************************************************************************/
    407 res_T
    408 pool_create
    409   (const struct pool_create_args* args,
    410    struct pool** out_pool)
    411 {
    412   struct mem_allocator* allocator = NULL;
    413   struct pool* pool = NULL;
    414   size_t ipartition = 0;
    415   size_t nvoxels = 0;
    416   size_t tile_sz = 0;
    417 
    418   res_T res = RES_OK;
    419   ASSERT(check_pool_create_args(args) == RES_OK && out_pool);
    420 
    421   allocator = args->allocator ? args->allocator : &mem_default_allocator;
    422   pool = MEM_CALLOC(allocator, 1, sizeof(*pool));
    423   if(!pool) {
    424     res = RES_MEM_ERR;
    425     goto error;
    426   }
    427   pool->allocator = allocator;
    428   pool->partition_definition = args->partition_definition;
    429   pool->voxel_width = args->voxel_width;
    430   pool->partition_nvoxels =
    431     pool->partition_definition
    432   * pool->partition_definition
    433   * pool->partition_definition;
    434   ref_init(&pool->ref);
    435   list_init(&pool->parts_free);
    436   list_init(&pool->parts_commit);
    437   list_init(&pool->tiles_free);
    438   voxel_clear(pool->empty_voxel);
    439 
    440   /* Create the mutex and condition variables used to synchronize threads */
    441   pool->mutex = mutex_create();
    442   if(!pool->mutex) { res = RES_UNKNOWN_ERR; goto error; }
    443   pool->cond_new = cond_create();
    444   if(!pool->cond_new) { res = RES_UNKNOWN_ERR; goto error; }
    445   pool->cond_fetch = cond_create();
    446   if(!pool->cond_fetch) { res = RES_UNKNOWN_ERR; goto error; }
    447   pool->cond_tile = cond_create();
    448   if(!pool->cond_tile) { res = RES_UNKNOWN_ERR; goto error; }
    449 
    450   /* Allocate the partitions */
    451   pool->parts = MEM_CALLOC(allocator, args->npartitions, sizeof(*pool->parts));
    452   if(!pool->parts) { res = RES_MEM_ERR; goto error; }
    453 
    454   /* Allocate the tiles */
    455   nvoxels = pool->partition_nvoxels;
    456   tile_sz =
    457     sizeof(struct tile)
    458   - sizeof(float)/*Dummy member*/
    459   + sizeof(float[NFLOATS_PER_VOXEL]) * nvoxels * pool->voxel_width;
    460   tile_sz = ALIGN_SIZE(tile_sz, ALIGNOF(struct tile));
    461   pool->tiles = MEM_CALLOC(allocator, args->npreallocated_partitions, tile_sz);
    462   if(!pool->tiles) { res = RES_MEM_ERR; goto error; }
    463 
    464   /* Setup the free partitions */
    465   FOR_EACH(ipartition, 0, args->npartitions) {
    466     struct partition* partition = pool->parts + ipartition;
    467     partition_init(pool, partition);
    468     list_add(&pool->parts_free, &partition->node);
    469   }
    470 
    471   /* Setup the free tiles */
    472   FOR_EACH(ipartition, 0, args->npreallocated_partitions) {
    473     struct tile* tile = (struct tile*)(pool->tiles + ipartition*tile_sz);
    474     tile_init(tile);
    475     list_add(&pool->tiles_free, &tile->node);
    476   }
    477 
    478 exit:
    479   *out_pool = pool;
    480   return res;
    481 error:
    482   if(pool) { pool_ref_put(pool); pool = NULL; }
    483   goto exit;
    484 }
    485 
    486 void
    487 pool_ref_get(struct pool* pool)
    488 {
    489   ASSERT(pool);
    490   ref_get(&pool->ref);
    491 }
    492 
    493 void
    494 pool_ref_put(struct pool* pool)
    495 {
    496   ASSERT(pool);
    497   ref_put(&pool->ref, release_pool);
    498 }
    499 
    500 size_t
    501 pool_get_partition_definition(const struct pool* pool)
    502 {
    503   ASSERT(pool);
    504   return pool->partition_definition;
    505 }
    506 
    507 size_t
    508 pool_get_voxel_width(const struct pool* pool)
    509 {
    510   ASSERT(pool);
    511   return pool->voxel_width;
    512 }
    513 
    514 struct partition*
    515 pool_next_partition(struct pool* pool)
    516 {
    517   struct partition* partition;
    518   ASSERT(pool);
    519 
    520   partition = pool_dummy_partition(pool);
    521   if(!partition) return NULL;
    522 
    523   mutex_lock(pool->mutex);
    524   partition->id = pool->next_part_id;
    525   pool->next_part_id += 1;
    526   mutex_unlock(pool->mutex);
    527 
    528   return partition;
    529 }
    530 
    531 struct partition*
    532 pool_dummy_partition(struct pool* pool)
    533 {
    534   struct partition* partition;
    535   res_T res = RES_OK;
    536   ASSERT(pool);
    537 
    538   partition = get_free_partition(pool);
    539   res = reserve_partition_voxels(partition);
    540   if(res != RES_OK) {
    541     partition_free(partition);
    542     return NULL;
    543   }
    544   return partition;
    545 }
    546 
    547 struct partition*
    548 pool_fetch_partition(struct pool* pool, const size_t ipartition)
    549 {
    550   struct partition* found_partition = NULL;
    551   struct list_node* node = NULL;
    552   ASSERT(pool);
    553 
    554   mutex_lock(pool->mutex);
    555   while(!pool->error) {
    556 
    557     /* Search for the partition that matches the submitted id */
    558     LIST_FOR_EACH(node, &pool->parts_commit) {
    559       struct partition* partition = CONTAINER_OF(node, struct partition, node);
    560 
    561       /* The partition to fetch has been found */
    562       if(partition->id == ipartition) {
    563         found_partition = partition;
    564         break;
    565       }
    566 
    567       /* Partitions are sorted in ascending order. Stop the linear search if the
    568        * current id is greater than the submitted one */
    569       if(partition->id > ipartition) break;
    570     }
    571 
    572     if(!found_partition) {
    573       /* The partition is still not committed. The thread is waiting for it */
    574       cond_wait(pool->cond_fetch, pool->mutex);
    575 
    576     } else {
    577       /* Do not remove the found partition from the list of committed
    578        * partitions since the same partition can be fetched several times when
    579        * the width of the voxels is greater than 1 */
    580       /* list_del(&found_partition->node); */
    581       break;
    582     }
    583   }
    584   mutex_unlock(pool->mutex);
    585 
    586   return found_partition;
    587 }
    588 
    589 void
    590 pool_invalidate(struct pool* pool)
    591 {
    592   ASSERT(pool);
    593 
    594   /* Notifies that an error has occurred */
    595   mutex_lock(pool->mutex);
    596   pool->error = 1;
    597   mutex_unlock(pool->mutex);
    598 
    599   /* Wakes up all the waiting threads */
    600   cond_broadcast(pool->cond_new);
    601   cond_broadcast(pool->cond_fetch);
    602   cond_broadcast(pool->cond_tile);
    603 }
    604 
    605 void
    606 pool_reset(struct pool* pool)
    607 {
    608   ASSERT(pool);
    609   mutex_lock(pool->mutex);
    610   pool->next_part_id = 0;
    611   mutex_unlock(pool->mutex);
    612 }