svx_buffer.c (10621B)
1 /* Copyright (C) 2018, 2020-2025 |Méso|Star> (contact@meso-star.com) 2 * Copyright (C) 2018 Université Paul Sabatier 3 * 4 * This program is free software: you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation, either version 3 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. */ 16 17 #include "svx_buffer.h" 18 19 #include <rsys/math.h> 20 #include <rsys/mem_allocator.h> 21 22 #ifdef COMPILER_CL 23 #define WIN32_LEAN_AND_MEAN 24 #include <windows.h> 25 #else 26 #include <unistd.h> 27 #endif 28 29 /******************************************************************************* 30 * Helper functions 31 ******************************************************************************/ 32 static INLINE res_T 33 ensure_allocated_nodes(struct buffer* buf, const size_t nnodes) 34 { 35 char* node_page = NULL; 36 size_t nnode_pages = 0; 37 res_T res = RES_OK; 38 ASSERT(buf); 39 40 if(buf->node_head.ipage != BUFFER_INDEX_NULL.ipage 41 && buf->node_head.inode + nnodes <= buf->pagesize/sizeof(struct buffer_xnode)) 42 goto exit; 43 44 nnode_pages = darray_page_size_get(&buf->node_pages); 45 if(nnode_pages > UINT32_MAX) { res = RES_MEM_ERR; goto error; } 46 ASSERT(nnode_pages == buf->node_head.ipage + 1); 47 48 /* Alloc and register a node page containing the nodes and the far indices */ 49 node_page = MEM_CALLOC(buf->allocator, 1, buf->pagesize); 50 if(!node_page) { res = RES_MEM_ERR; goto error; } 51 res = darray_page_push_back(&buf->node_pages, &node_page); 52 if(res != RES_OK) goto error; 53 54 buf->node_head.inode = 0; 55 buf->node_head.ipage = (uint32_t)nnode_pages; 56 57 exit: 58 return res; 59 error: 60 if(node_page) MEM_RM(buf->allocator, node_page); 61 CHK(darray_page_resize(&buf->node_pages, nnode_pages) == RES_OK); 62 goto exit; 63 } 64 65 static INLINE res_T 66 ensure_allocated_attrs(struct buffer* buf, const size_t nattrs) 67 { 68 char* attr_page = NULL; 69 size_t nattr_pages = 0; 70 res_T res = RES_OK; 71 ASSERT(buf); 72 73 if(buf->attr_head.ipage != BUFFER_INDEX_NULL.ipage 74 && buf->attr_head.inode + nattrs <= buf->pagesize/buf->voxsize) 75 goto exit; 76 77 nattr_pages = darray_page_size_get(&buf->attr_pages); 78 if(nattr_pages > UINT32_MAX) { res = RES_MEM_ERR; goto error; } 79 ASSERT(nattr_pages == buf->attr_head.ipage + 1); 80 81 /* Alloc and register a attr page */ 82 attr_page = MEM_CALLOC(buf->allocator, 1, buf->pagesize); 83 if(!attr_page) { res = RES_MEM_ERR; goto error; } 84 res = darray_page_push_back(&buf->attr_pages, &attr_page); 85 if(res != RES_OK) goto error; 86 87 buf->attr_head.inode = 0; 88 buf->attr_head.ipage = (uint32_t)nattr_pages; 89 90 exit: 91 return res; 92 error: 93 if(attr_page) MEM_RM(buf->allocator, attr_page); 94 CHK(darray_page_resize(&buf->attr_pages, nattr_pages) == RES_OK); 95 goto exit; 96 } 97 98 /******************************************************************************* 99 * Local functions 100 ******************************************************************************/ 101 void 102 buffer_init 103 (struct mem_allocator* allocator, 104 const size_t voxel_size, 105 struct buffer* buf) 106 { 107 ASSERT(buf && allocator); 108 memset(buf, 0, sizeof(struct buffer)); 109 #ifdef COMPILER_CL 110 SYSTEM_INFO si; 111 GetSystemInfo(&si); 112 buf->pagesize = si.dwPageSize; 113 #else 114 buf->pagesize = (size_t)sysconf(_SC_PAGESIZE); 115 #endif 116 buf->voxsize = voxel_size; 117 darray_page_init(allocator, &buf->node_pages); 118 darray_page_init(allocator, &buf->attr_pages); 119 buf->node_head = BUFFER_INDEX_NULL; 120 buf->attr_head = BUFFER_INDEX_NULL; 121 buf->allocator = allocator; 122 CHK(buf->voxsize <= buf->pagesize); 123 } 124 125 void 126 buffer_release(struct buffer* buf) 127 { 128 ASSERT(buf); 129 buffer_clear(buf); 130 darray_page_release(&buf->node_pages); 131 darray_page_release(&buf->attr_pages); 132 } 133 134 res_T 135 buffer_alloc_nodes 136 (struct buffer* buf, 137 const size_t nnodes, 138 struct buffer_index* first_node) 139 { 140 res_T res = RES_OK; 141 ASSERT(buf && first_node); 142 143 if(nnodes > buf->pagesize / sizeof(struct buffer_xnode)) 144 return RES_MEM_ERR; 145 146 res = ensure_allocated_nodes(buf, nnodes); 147 if(res != RES_OK) return res; 148 149 *first_node = buf->node_head; 150 buf->node_head.inode = (uint16_t)(buf->node_head.inode + nnodes); 151 return RES_OK; 152 } 153 154 res_T 155 buffer_alloc_attrs 156 (struct buffer* buf, 157 const size_t nattrs, 158 struct buffer_index* first_attr) 159 { 160 res_T res = RES_OK; 161 ASSERT(buf && first_attr); 162 163 if(nattrs > buf->pagesize / buf->voxsize) return RES_MEM_ERR; 164 165 res = ensure_allocated_attrs(buf, nattrs); 166 if(res != RES_OK) return res; 167 168 *first_attr = buf->attr_head; 169 buf->attr_head.inode = (uint16_t)(buf->attr_head.inode + nattrs); 170 return RES_OK; 171 } 172 173 res_T 174 buffer_alloc_far_index 175 (struct buffer* buf, 176 struct buffer_index* id) 177 { 178 size_t remaining_size; 179 size_t skipped_nnodes; 180 STATIC_ASSERT(sizeof(struct buffer_index) >= sizeof(struct buffer_xnode), 181 Unexpected_type_size); 182 183 remaining_size = buf->pagesize - buf->node_head.inode*sizeof(struct buffer_xnode); 184 185 /* Not enough memory in the current page */ 186 if(sizeof(struct buffer_index) > remaining_size) return RES_MEM_ERR; 187 188 *id = buf->node_head; 189 skipped_nnodes = sizeof(struct buffer_index) / sizeof(struct buffer_xnode); 190 buf->node_head.inode = (uint16_t)(buf->node_head.inode + skipped_nnodes); 191 return RES_OK; 192 } 193 194 void 195 buffer_clear(struct buffer* buf) 196 { 197 size_t i; 198 ASSERT(buf); 199 FOR_EACH(i, 0, darray_page_size_get(&buf->node_pages)) { 200 MEM_RM(buf->allocator, darray_page_data_get(&buf->node_pages)[i]); 201 } 202 FOR_EACH(i, 0, darray_page_size_get(&buf->attr_pages)) { 203 MEM_RM(buf->allocator, darray_page_data_get(&buf->attr_pages)[i]); 204 } 205 darray_page_purge(&buf->node_pages); 206 darray_page_purge(&buf->attr_pages); 207 buf->node_head = BUFFER_INDEX_NULL; 208 buf->attr_head = BUFFER_INDEX_NULL; 209 } 210 211 res_T 212 buffer_write(const struct buffer* buf, FILE* stream) 213 { 214 size_t ipage = 0; 215 size_t npages = 0; 216 res_T res = RES_OK; 217 ASSERT(buf && stream); 218 219 #define WRITE(Var, N) { \ 220 if(fwrite((Var), sizeof(*(Var)), (N), stream) != (N)) { \ 221 res = RES_IO_ERR; \ 222 goto error; \ 223 } \ 224 } (void)0 225 WRITE(&BUFFER_VERSION, 1); 226 WRITE(&buf->pagesize, 1); 227 WRITE(&buf->voxsize, 1); 228 WRITE(&buf->node_head, 1); 229 WRITE(&buf->attr_head, 1); 230 231 npages = darray_page_size_get(&buf->node_pages); 232 WRITE(&npages, 1); 233 FOR_EACH(ipage, 0, npages) { 234 WRITE(darray_page_cdata_get(&buf->node_pages)[ipage], buf->pagesize); 235 } 236 237 npages = darray_page_size_get(&buf->attr_pages); 238 WRITE(&npages, 1); 239 FOR_EACH(ipage, 0, npages) { 240 WRITE(darray_page_cdata_get(&buf->attr_pages)[ipage], buf->pagesize); 241 } 242 #undef WRITE 243 244 exit: 245 return res; 246 error: 247 goto exit; 248 } 249 250 res_T 251 buffer_read(struct buffer* buf, FILE* stream) 252 { 253 int version = 0; 254 char* page = NULL; 255 size_t ipage = 0; 256 size_t npages = 0; 257 res_T res = RES_OK; 258 ASSERT(buf && stream); 259 260 buffer_clear(buf); 261 262 #define READ(Var, N) { \ 263 if(fread((Var), sizeof(*(Var)), (N), stream) != (N)) { \ 264 if(feof(stream)) { \ 265 res = RES_BAD_ARG; \ 266 } else if(ferror(stream)) { \ 267 res = RES_IO_ERR; \ 268 } else { \ 269 res = RES_UNKNOWN_ERR; \ 270 } \ 271 goto error; \ 272 } \ 273 } (void)0 274 275 /* Currently only one version of the buffer data structure could be 276 * serialized. The version management is thus as simple as rejecting any 277 * buffer data structure whose version is not the current version. */ 278 READ(&version, 1); 279 if(version != BUFFER_VERSION) { 280 res = RES_BAD_ARG; 281 goto error; 282 } 283 284 READ(&buf->pagesize, 1); 285 READ(&buf->voxsize, 1); 286 READ(&buf->node_head, 1); 287 READ(&buf->attr_head, 1); 288 289 READ(&npages, 1); 290 res = darray_page_reserve(&buf->node_pages, npages); 291 if(res != RES_OK) goto error; 292 293 /* Read the pages of nodes */ 294 FOR_EACH(ipage, 0, npages) { 295 page = MEM_ALLOC(buf->allocator, buf->pagesize); 296 if(!page) { res = RES_MEM_ERR; goto error; } 297 298 READ(page, buf->pagesize); 299 CHK(darray_page_push_back(&buf->node_pages, &page) == RES_OK); 300 page = NULL; 301 } 302 303 READ(&npages, 1); 304 res = darray_page_reserve(&buf->attr_pages, npages); 305 if(res != RES_OK) goto error; 306 307 /* Read the pages of attribs */ 308 FOR_EACH(ipage, 0, npages) { 309 page = MEM_ALLOC(buf->allocator, buf->pagesize); 310 if(!page) { res = RES_MEM_ERR; goto error; } 311 312 READ(page, buf->pagesize); 313 CHK(darray_page_push_back(&buf->attr_pages, &page) == RES_OK); 314 page = NULL; 315 } 316 #undef READ 317 318 exit: 319 return res; 320 error: 321 if(page) MEM_RM(buf->allocator, page); 322 buffer_clear(buf); 323 goto exit; 324 } 325 326 res_T 327 buffer_check_tree 328 (struct buffer* buf, 329 const struct buffer_index root, 330 const size_t tree_dimension, 331 size_t* nleaves) 332 { 333 const struct buffer_xnode* node; 334 const int nchildren = BIT((int)tree_dimension); 335 int ichild; 336 res_T res = RES_OK; 337 ASSERT(buf); 338 ASSERT(0 < tree_dimension && tree_dimension <= 3); 339 340 node = buffer_get_node(buf, root); 341 FOR_EACH(ichild, 0, nchildren) { 342 const int ichild_flag = BIT(ichild); 343 if((node->is_valid & ichild_flag) == 0) continue; 344 345 if(node->is_leaf & ichild_flag) { 346 struct buffer_index iattr; 347 iattr = buffer_get_child_attr_index(buf, root, ichild); 348 if(buffer_get_attr(buf, iattr) == NULL) 349 return RES_BAD_ARG; 350 *nleaves += 1; 351 } else { 352 struct buffer_index child; 353 child = buffer_get_child_node_index(buf, root, ichild); 354 res = buffer_check_tree(buf, child, tree_dimension, nleaves); 355 if(res != RES_OK) return res; 356 } 357 } 358 return RES_OK; 359 } 360