senc2d_scene_analyze.c (55404B)
1 /* Copyright (C) 2018-2021, 2023, 2024 |Méso|Star> (contact@meso-star.com) 2 * 3 * This program is free software: you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License as published by 5 * the Free Software Foundation, either version 3 of the License, or 6 * (at your option) any later version. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. */ 15 16 #include "senc2d.h" 17 #include "senc2d_device_c.h" 18 #include "senc2d_scene_c.h" 19 #include "senc2d_enclosure_data.h" 20 #include "senc2d_scene_analyze_c.h" 21 #include "senc2d_internal_types.h" 22 23 #include <rsys/rsys.h> 24 #include <rsys/float2.h> 25 #include <rsys/double22.h> 26 #include <rsys/mem_allocator.h> 27 #include <rsys/hash_table.h> 28 #include <rsys/dynamic_array.h> 29 #include <rsys/dynamic_array_uchar.h> 30 #include <rsys/clock_time.h> 31 32 #include <star/s2d.h> 33 34 #include <omp.h> 35 #include <limits.h> 36 #include <stdlib.h> 37 38 #define CC_DESCRIPTOR_NULL__ {\ 39 0, 0, INT_MAX, VRTX_NULL__, 0,\ 40 CC_ID_NONE, CC_GROUP_ROOT_NONE, ENCLOSURE_NULL__,\ 41 { SEG_NULL__, 0},\ 42 NULL\ 43 } 44 #ifdef COMPILER_GCC 45 #pragma GCC diagnostic push 46 #pragma GCC diagnostic ignored "-Wmissing-field-initializers" 47 #endif 48 const struct cc_descriptor CC_DESCRIPTOR_NULL = CC_DESCRIPTOR_NULL__; 49 #ifdef COMPILER_GCC 50 #pragma GCC diagnostic pop 51 #endif 52 53 #define DARRAY_NAME component_id 54 #define DARRAY_DATA component_id_t 55 #include <rsys/dynamic_array.h> 56 57 #define HTABLE_NAME overlap 58 #define HTABLE_KEY seg_id_t 59 #define HTABLE_DATA char 60 #include <rsys/hash_table.h> 61 62 /****************************************************************************** 63 * Helper function 64 *****************************************************************************/ 65 static INLINE int 66 neighbour_cmp(const void* w1, const void* w2) 67 { 68 const double a1 = ((struct neighbour_info*)w1)->angle; 69 const double a2 = ((struct neighbour_info*)w2)->angle; 70 return (a1 > a2) - (a1 < a2); 71 } 72 73 static side_id_t 74 get_side_not_in_connex_component 75 (const side_id_t last_side, 76 const struct segside* segsides, 77 const uchar* processed, 78 side_id_t* first_side_not_in_component, 79 const medium_id_t medium) 80 { 81 ASSERT(segsides && processed && first_side_not_in_component); 82 { 83 side_id_t i = *first_side_not_in_component; 84 while(i <= last_side 85 && (segsides[i].medium != medium 86 || (processed[SEGSIDE_2_SEG(i)] & SEGSIDE_2_SIDEFLAG(i)))) 87 ++i; 88 89 *first_side_not_in_component = i + 1; 90 if(i > last_side) return SIDE_NULL__; 91 return i; 92 } 93 } 94 95 /* Here unsigned are required by s2d API */ 96 static void 97 get_scn_indices(const unsigned iseg, unsigned ids[2], void* ctx) { 98 int i; 99 const struct senc2d_scene* scene = ctx; 100 const struct segment_in* seg = 101 darray_segment_in_cdata_get(&scene->segments_in) + iseg; 102 FOR_EACH(i, 0, 2) { 103 ASSERT(seg->vertice_id[i] < scene->nverts); 104 ASSERT(seg->vertice_id[i] <= UINT_MAX); 105 ids[i] = (unsigned)seg->vertice_id[i]; /* Back to s2d API type */ 106 } 107 } 108 109 /* Here unsigned are required by s2d API */ 110 static void 111 get_scn_position(const unsigned ivert, float pos[2], void* ctx) { 112 const struct senc2d_scene* scene = ctx; 113 const union double2* pt = 114 darray_position_cdata_get(&scene->vertices) + ivert; 115 f2_set_d2(pos, pt->vec); 116 } 117 118 static int 119 self_hit_filter 120 (const struct s2d_hit* hit, 121 const float ray_org[2], 122 const float ray_dir[2], 123 const float ray_range[2], 124 void* ray_data, 125 void* filter_data) 126 { 127 const struct darray_segment_comp* segments_comp = filter_data; 128 const component_id_t* origin_component = ray_data; 129 const struct segment_comp* hit_seg_comp; 130 131 (void)ray_org; (void)ray_dir; (void)ray_range; 132 ASSERT(hit && segments_comp && origin_component); 133 ASSERT(hit->prim.prim_id < darray_segment_comp_size_get(segments_comp)); 134 hit_seg_comp = darray_segment_comp_cdata_get(segments_comp) 135 + hit->prim.prim_id; 136 return (hit_seg_comp->component[SENC2D_FRONT] == *origin_component 137 || hit_seg_comp->component[SENC2D_BACK] == *origin_component); 138 } 139 140 static void 141 extract_connex_components 142 (struct senc2d_scene* scn, 143 struct segside* segsides, 144 struct darray_ptr_component_descriptor* connex_components, 145 const struct darray_segment_tmp* segments_tmp_array, 146 struct darray_segment_comp* segments_comp_array, 147 struct s2d_scene_view** s2d_view, 148 ATOMIC* component_count, 149 /* Shared error status. 150 * We accept to overwrite an error with a different error */ 151 res_T* p_res) 152 { 153 /* This function is called from an omp parallel block and executed 154 * concurrently. */ 155 struct mem_allocator* alloc; 156 int64_t m_idx; /* OpenMP requires a signed type for the for loop variable */ 157 struct darray_side_id stack; 158 const union double2* positions; 159 const struct segment_tmp* segments_tmp; 160 struct segment_comp* segments_comp; 161 /* An array to flag sides when processed */ 162 uchar* processed; 163 /* An array to store the component being processed */ 164 struct darray_side_id current_component; 165 /* A bool array to store media of the component being processed */ 166 uchar* current_media = NULL; 167 size_t sz, ii; 168 169 ASSERT(scn && segsides && connex_components && segments_tmp_array 170 && segments_comp_array && s2d_view && component_count && p_res); 171 alloc = scn->dev->allocator; 172 positions = darray_position_cdata_get(&scn->vertices); 173 segments_tmp = darray_segment_tmp_cdata_get(segments_tmp_array); 174 segments_comp = darray_segment_comp_data_get(segments_comp_array); 175 darray_side_id_init(alloc, &stack); 176 darray_side_id_init(alloc, ¤t_component); 177 processed = MEM_CALLOC(alloc, scn->nsegs, sizeof(uchar)); 178 if(!processed) { 179 *p_res = RES_MEM_ERR; 180 return; 181 } 182 183 #ifndef NDEBUG 184 #pragma omp single 185 { 186 seg_id_t s_; 187 int s; 188 ASSERT(darray_ptr_component_descriptor_size_get(connex_components) == 0); 189 FOR_EACH(s_, 0, scn->nsegs) { 190 const struct segment_in* seg_in = 191 darray_segment_in_cdata_get(&scn->segments_in) + s_; 192 const struct side_range* media_use 193 = darray_side_range_cdata_get(&scn->media_use); 194 FOR_EACH(s, 0, 2) { 195 const side_id_t side = SEGIDxSIDE_2_SEGSIDE(s_, s); 196 medium_id_t medium = seg_in->medium[s]; 197 m_idx = medium_id_2_medium_idx(medium); 198 ASSERT(media_use[m_idx].first <= side && side 199 <= media_use[m_idx].last); 200 } 201 } 202 } /* Implicit barrier here */ 203 #endif 204 205 /* We loop on sides to build connex components. */ 206 #pragma omp for schedule(dynamic) nowait 207 /* Process all media, including undef */ 208 for(m_idx = 0; m_idx < 1 + (int64_t)scn->next_medium_idx; m_idx++) { 209 const medium_id_t medium = medium_idx_2_medium_id(m_idx); 210 /* media_use 0 is for SENC2D_UNSPECIFIED_MEDIUM, n+1 is for n */ 211 const struct side_range* media_use = 212 darray_side_range_cdata_get(&scn->media_use) + m_idx; 213 /* Any not-already-used side is used as a starting point */ 214 side_id_t first_side_not_in_component = media_use->first; 215 double max_ny; 216 side_id_t max_ny_side_id; 217 const side_id_t last_side = media_use->last; 218 int component_canceled = 0, max_y_is_2sided = 0, fst_ny = 1; 219 side_id_t cc_start_side_id = SIDE_NULL__; 220 side_id_t cc_last_side_id = SIDE_NULL__; 221 res_T tmp_res = RES_OK; 222 ATOMIC id; 223 224 if(*p_res != RES_OK) continue; 225 if(first_side_not_in_component == SIDE_NULL__) 226 continue; /* Unused medium */ 227 ASSERT(first_side_not_in_component < 2 * scn->nsegs); 228 ASSERT(darray_side_id_size_get(&stack) == 0); 229 ASSERT(darray_side_id_size_get(¤t_component) == 0); 230 for(;;) { /* Process all components for this medium */ 231 side_id_t crt_side_id = get_side_not_in_connex_component 232 (last_side, segsides, processed, &first_side_not_in_component, medium); 233 vrtx_id_t max_y_vrtx_id = VRTX_NULL__; 234 struct cc_descriptor *cc; 235 double max_y = -DBL_MAX; 236 component_canceled = 0; 237 ASSERT(crt_side_id == SIDE_NULL__ || crt_side_id < 2 * scn->nsegs); 238 darray_side_id_clear(¤t_component); 239 240 if(*p_res != RES_OK) break; 241 if(crt_side_id == SIDE_NULL__) 242 break; /* start_side_id=SIDE_NULL__ => component done! */ 243 244 if(cc_start_side_id == SIDE_NULL__) { 245 cc_start_side_id = cc_last_side_id = crt_side_id; 246 } else { 247 cc_start_side_id = MMIN(cc_start_side_id, crt_side_id); 248 cc_last_side_id = MMAX(cc_last_side_id, crt_side_id); 249 } 250 251 #ifndef NDEBUG 252 { 253 seg_id_t sid = SEGSIDE_2_SEG(crt_side_id); 254 enum senc2d_side s = SEGSIDE_2_SIDE(crt_side_id); 255 medium_id_t side_med 256 = darray_segment_in_data_get(&scn->segments_in)[sid].medium[s]; 257 ASSERT(side_med == medium); 258 } 259 #endif 260 261 /* Reuse array if possible, or create a new one */ 262 if(current_media) { 263 /* current_media 0 is for SENC2D_UNSPECIFIED_MEDIUM, n+1 is for n */ 264 memset(current_media, 0, 1 + scn->next_medium_idx); 265 } else { 266 current_media = MEM_CALLOC(alloc, 1 + scn->next_medium_idx, 267 sizeof(*current_media)); 268 if(!current_media) { 269 *p_res = RES_MEM_ERR; 270 continue; 271 } 272 } 273 current_media[m_idx] = 1; 274 for(;;) { /* Process all sides of this component */ 275 int i; 276 enum side_flag crt_side_flag = SEGSIDE_2_SIDEFLAG(crt_side_id); 277 struct segside* crt_side = segsides + crt_side_id; 278 const seg_id_t crt_seg_id = SEGSIDE_2_SEG(crt_side_id); 279 uchar* seg_used = processed + crt_seg_id; 280 const struct segment_tmp* const seg_tmp = segments_tmp + crt_seg_id; 281 ASSERT(crt_seg_id < scn->nsegs); 282 283 if(*p_res != RES_OK) break; 284 285 /* Record max_y information 286 * Keep track of the appropriate vertex of the component in order 287 * to cast a ray at the component grouping step of the algorithm. 288 * The most appropriate vertex is (the) one with the greater Y 289 * coordinate. */ 290 if(max_y < seg_tmp->max_y) { 291 const struct segment_in* seg_in = 292 darray_segment_in_cdata_get(&scn->segments_in) + crt_seg_id; 293 /* New best vertex */ 294 max_y = seg_tmp->max_y; 295 296 /* Select a vertex with y == max_y */ 297 FOR_EACH(i, 0, 2) { 298 if(max_y == positions[seg_in->vertice_id[i]].pos.y) { 299 max_y_vrtx_id = seg_in->vertice_id[i]; 300 break; 301 } 302 } 303 ASSERT(i < 2); /* Found one */ 304 } 305 306 /* Record crt_side both as component and segment level */ 307 if((*seg_used & crt_side_flag) == 0) { 308 OK2(darray_side_id_push_back(¤t_component, &crt_side_id)); 309 *seg_used = *seg_used | (uchar)crt_side_flag; 310 } 311 312 /* Store neighbour's sides in a waiting stack */ 313 FOR_EACH(i, 0, 2) { 314 side_id_t neighbour_id = crt_side->facing_side_id[i]; 315 seg_id_t nbour_seg_id = SEGSIDE_2_SEG(neighbour_id); 316 enum side_flag nbour_side_id = SEGSIDE_2_SIDEFLAG(neighbour_id); 317 uchar* nbour_used = processed + nbour_seg_id; 318 const struct segside* neighbour = segsides + neighbour_id; 319 medium_id_t nbour_med_idx = medium_id_2_medium_idx(neighbour->medium); 320 if((int64_t)nbour_med_idx < m_idx 321 || (*nbour_used & SIDE_CANCELED_FLAG(nbour_side_id))) 322 { 323 /* 1) Not the same medium. 324 * Neighbour's medium idx is less than current medium: the whole 325 * component is to be processed by another thread (possibly the one 326 * associated with neighbour's medium). 327 * 2) Neighbour was canceled: no need to replay the component 328 * again as it will eventually rediscover the side with low medium 329 * id and recancel all the work in progress */ 330 component_canceled = 1; 331 darray_side_id_clear(&stack); 332 /* Don't cancel used flags as all these sides will get us back to 333 * (at least) the neighbour side we have just discovered, that will 334 * cancel them again and again */ 335 sz = darray_side_id_size_get(¤t_component); 336 FOR_EACH(ii, 0, sz) { 337 side_id_t used_side 338 = darray_side_id_cdata_get(¤t_component)[ii]; 339 seg_id_t used_seg_id = SEGSIDE_2_SEG(used_side); 340 enum side_flag used_side_flag 341 = SEGSIDE_2_SIDEFLAG(used_side); 342 uchar* used = processed + used_seg_id; 343 ASSERT(*used & (uchar)used_side_flag); 344 /* Set the used flag for sides in cancelled component as leading 345 * to further cancellations */ 346 *used |= SIDE_CANCELED_FLAG(used_side_flag); 347 } 348 349 goto canceled; 350 } 351 if(*nbour_used & nbour_side_id) continue; /* Already processed */ 352 /* Mark neighbour as processed and stack it */ 353 *nbour_used |= (uchar)nbour_side_id; 354 OK2(darray_side_id_push_back(&stack, &neighbour_id)); 355 OK2(darray_side_id_push_back(¤t_component, &neighbour_id)); 356 current_media[nbour_med_idx] = 1; 357 } 358 sz = darray_side_id_size_get(&stack); 359 if(sz == 0) break; /* Empty stack => component is done! */ 360 crt_side_id = darray_side_id_cdata_get(&stack)[sz - 1]; 361 darray_side_id_pop_back(&stack); 362 cc_start_side_id = MMIN(cc_start_side_id, crt_side_id); 363 cc_last_side_id = MMAX(cc_last_side_id, crt_side_id); 364 } 365 canceled: 366 if(component_canceled) continue; 367 368 /* Register the new component and get it initialized */ 369 cc = MEM_ALLOC(alloc, sizeof(struct cc_descriptor)); 370 if(!cc) *p_res = RES_MEM_ERR; 371 if(*p_res != RES_OK) break; 372 373 ASSERT(max_y_vrtx_id != VRTX_NULL__); 374 cc_descriptor_init(alloc, cc); 375 id = ATOMIC_INCR(component_count) - 1; 376 ASSERT(id <= COMPONENT_MAX__); 377 cc->cc_id = (component_id_t)id; 378 sz = darray_side_id_size_get(¤t_component); 379 ASSERT(sz > 0 && sz <= SIDE_MAX__); 380 cc->side_count = (side_id_t)sz; 381 cc->side_range.first = cc_start_side_id; 382 cc->side_range.last = cc_last_side_id; 383 cc->max_y_vrtx_id = max_y_vrtx_id; 384 /* Tranfer ownership of the array to component */ 385 ASSERT(!cc->media && current_media); 386 cc->media = current_media; 387 current_media = NULL; 388 389 /* Reset for next component */ 390 cc_start_side_id = SIDE_NULL__; 391 cc_last_side_id = SIDE_NULL__; 392 393 /* Write component membership in the global structure 394 * No need for sync here as an unique thread writes a given side */ 395 {STATIC_ASSERT(sizeof(cc->cc_id) >= 4, Cannot_write_IDs_sync_free);} 396 ASSERT(IS_ALIGNED(segments_comp->component, sizeof(cc->cc_id))); 397 FOR_EACH(ii, 0, sz) { 398 const side_id_t s_id = darray_side_id_cdata_get(¤t_component)[ii]; 399 seg_id_t seg_id = SEGSIDE_2_SEG(s_id); 400 enum senc2d_side side = SEGSIDE_2_SIDE(s_id); 401 component_id_t* cmp = segments_comp[seg_id].component; 402 ASSERT(cmp[side] == COMPONENT_NULL__); 403 ASSERT(medium_id_2_medium_idx(segsides[s_id].medium) 404 >= medium_id_2_medium_idx(medium)); 405 cmp[side] = cc->cc_id; 406 } 407 408 /* Compute component area and volume, and record information on the 409 * max_y side of the component to help find out if the component is 410 * inner or outer */ 411 fst_ny = 1; 412 max_ny = 0; 413 max_ny_side_id = SIDE_NULL__; 414 FOR_EACH(ii, 0, sz) { 415 const side_id_t s_id = darray_side_id_cdata_get(¤t_component)[ii]; 416 const seg_id_t seg_id = SEGSIDE_2_SEG(s_id); 417 enum senc2d_side side = SEGSIDE_2_SIDE(s_id); 418 const struct segment_comp* seg_comp = segments_comp + seg_id; 419 const struct segment_tmp* const seg_tmp = segments_tmp + seg_id; 420 const struct segment_in* seg_in = 421 darray_segment_in_cdata_get(&scn->segments_in) + seg_id; 422 const union double2* vertices = 423 darray_position_cdata_get(&scn->vertices); 424 double edge[2], normal[2], norm; 425 const double* v0 = vertices[seg_in->vertice_id[0]].vec; 426 const double* v1 = vertices[seg_in->vertice_id[1]].vec; 427 int is_2sided = (seg_comp->component[SENC2D_FRONT] 428 == seg_comp->component[SENC2D_BACK]); 429 430 /* Compute component area and volume */ 431 d2_sub(edge, v1, v0); 432 d2(normal, -edge[1], +edge[0]); 433 norm = d2_normalize(normal, normal); 434 ASSERT(norm); 435 /* The area is a n dim concept, that in 2D is in m 436 * Here the area contribution of the segment is its length */ 437 cc->area += norm; 438 439 if(!is_2sided) { 440 /* Build a triangle whose base is the segment and whose apex is 441 * the coordinate system's origin. If the 2 sides of the segment 442 * are part of the component, the contribution of the segment 443 * must be 0. To achieve this, we just skip both sides */ 444 double _2t = d2_cross(v0, v1); /* 2 * area of the triangle */ 445 446 /* The volume is a n dim concept, that in 2D is in m^2 447 * Here the volume contribution of the segment is the area of the 448 * triangle v0,v1,O */ 449 if((seg_comp->component[SENC2D_FRONT] == cc->cc_id) 450 == (scn->convention & SENC2D_CONVENTION_NORMAL_FRONT)) 451 cc->_2volume += _2t; 452 else 453 cc->_2volume -= _2t; 454 } 455 456 ASSERT(seg_comp->component[side] != COMPONENT_NULL__); (void)side; 457 if(seg_tmp->max_y == max_y) { 458 int i; 459 /* Candidate to define the max_ny (segment using max_y_vrtx) */ 460 FOR_EACH(i, 0, 2) { 461 if(cc->max_y_vrtx_id == seg_in->vertice_id[i]) { 462 if(fst_ny || fabs(normal[1]) > fabs(max_ny)) { 463 max_ny_side_id = s_id; 464 max_ny = normal[1]; 465 max_y_is_2sided = is_2sided; 466 fst_ny = 0; 467 break; 468 } 469 } 470 } 471 } 472 } 473 ASSERT(!fst_ny); 474 /* Determine if this component can be an inner part inside another 475 * component (substracting a volume) as only these components will need 476 * to search for their possible outer component when grouping 477 * components to create enclosures. 478 * The inner/outer property comes from the normal orientation of the 479 * segment on top of the component, this segment being the one whose 480 * |Ny| is maximal. If this segment has its 2 sides in the component, 481 * the component is inner */ 482 if(max_ny == 0 || max_y_is_2sided) cc->is_outer_border = 0; 483 else { 484 ASSERT(max_ny_side_id != SIDE_NULL__); 485 if(SEGSIDE_IS_FRONT(max_ny_side_id) 486 == ((scn->convention & SENC2D_CONVENTION_NORMAL_FRONT) != 0)) { 487 /* Geom normal points towards the component */ 488 cc->is_outer_border = (max_ny < 0); 489 } else { 490 /* Geom normal points away from the component */ 491 cc->is_outer_border = (max_ny > 0); 492 } 493 } 494 495 /* Need to synchronize connex_components growth as this global structure 496 * is accessed by multipe threads */ 497 #pragma omp critical 498 { 499 struct cc_descriptor** components; 500 sz = darray_ptr_component_descriptor_size_get(connex_components); 501 if(sz <= cc->cc_id) { 502 tmp_res = darray_ptr_component_descriptor_resize(connex_components, 503 1 + cc->cc_id); 504 if(tmp_res != RES_OK) *p_res = tmp_res; 505 } 506 if(*p_res == RES_OK) { 507 /* Don't set the pointer before resize as this can lead to move data */ 508 components = 509 darray_ptr_component_descriptor_data_get(connex_components); 510 ASSERT(components[cc->cc_id] == NULL); 511 components[cc->cc_id] = cc; 512 } 513 } 514 } 515 tmp_error: 516 if(tmp_res != RES_OK) *p_res = tmp_res; 517 } /* No barrier here */ 518 519 MEM_RM(alloc, processed); 520 MEM_RM(alloc, current_media); 521 darray_side_id_release(¤t_component); 522 darray_side_id_release(&stack); 523 524 /* The first thread here creates the s2d view */ 525 #pragma omp single nowait 526 if(*p_res == RES_OK) { 527 res_T res = RES_OK; 528 struct s2d_device* s2d = NULL; 529 struct s2d_scene* s2d_scn = NULL; 530 struct s2d_shape* s2d_shp = NULL; 531 struct s2d_vertex_data attribs; 532 533 attribs.type = S2D_FLOAT2; 534 attribs.usage = S2D_POSITION; 535 attribs.get = get_scn_position; 536 537 /* Put geometry in a 2D view */ 538 OK(s2d_device_create(scn->dev->logger, alloc, 0, &s2d)); 539 OK(s2d_scene_create(s2d, &s2d_scn)); 540 OK(s2d_shape_create_line_segments(s2d, &s2d_shp)); 541 542 /* Back to s2d API type for nsegs and nverts */ 543 ASSERT(scn->nsegs <= UINT_MAX); 544 ASSERT(scn->nverts <= UINT_MAX); 545 OK(s2d_line_segments_setup_indexed_vertices(s2d_shp, 546 (unsigned)scn->nsegs, get_scn_indices, 547 (unsigned)scn->nverts, &attribs, 1, scn)); 548 s2d_line_segments_set_hit_filter_function(s2d_shp, self_hit_filter, 549 segments_comp_array); 550 OK(s2d_scene_attach_shape(s2d_scn, s2d_shp)); 551 OK(s2d_scene_view_create(s2d_scn, S2D_TRACE, s2d_view)); 552 error: 553 if(res != RES_OK) *p_res = res; 554 if(s2d) S2D(device_ref_put(s2d)); 555 if(s2d_scn) S2D(scene_ref_put(s2d_scn)); 556 if(s2d_shp) S2D(shape_ref_put(s2d_shp)); 557 } 558 559 #ifndef NDEBUG 560 /* Need to wait for all threads done to be able to check stuff */ 561 #pragma omp barrier 562 if(*p_res != RES_OK) return; 563 #pragma omp single 564 { 565 seg_id_t s_; 566 component_id_t c; 567 ASSERT(ATOMIC_GET(component_count) == 568 (int)darray_ptr_component_descriptor_size_get(connex_components)); 569 FOR_EACH(s_, 0, scn->nsegs) { 570 struct segment_comp* seg_comp = 571 darray_segment_comp_data_get(segments_comp_array) + s_; 572 ASSERT(seg_comp->component[SENC2D_FRONT] != COMPONENT_NULL__); 573 ASSERT(seg_comp->component[SENC2D_BACK] != COMPONENT_NULL__); 574 } 575 FOR_EACH(c, 0, (component_id_t)ATOMIC_GET(component_count)) { 576 struct cc_descriptor** components = 577 darray_ptr_component_descriptor_data_get(connex_components); 578 ASSERT(components[c] != NULL && components[c]->cc_id == c); 579 } 580 } /* Implicit barrier here */ 581 #endif 582 } 583 584 static void 585 group_connex_components 586 (struct senc2d_scene* scn, 587 struct darray_segment_comp* segments_comp, 588 struct darray_ptr_component_descriptor* connex_components, 589 struct s2d_scene_view* s2d_view, 590 ATOMIC* next_enclosure_id, 591 /* Shared error status. 592 * We accept to overwrite an error with a different error */ 593 res_T* res) 594 { 595 /* This function is called from an omp parallel block and executed 596 * concurrently. */ 597 struct cc_descriptor** descriptors; 598 const union double2* positions; 599 size_t tmp; 600 component_id_t cc_count; 601 int64_t ccc; 602 603 ASSERT(scn && segments_comp && connex_components && s2d_view 604 && next_enclosure_id && res); 605 ASSERT(scn->analyze.enclosures_count == 1); 606 607 descriptors = darray_ptr_component_descriptor_data_get(connex_components); 608 tmp = darray_ptr_component_descriptor_size_get(connex_components); 609 ASSERT(tmp <= COMPONENT_MAX__); 610 cc_count = (component_id_t)tmp; 611 positions = darray_position_cdata_get(&scn->vertices); 612 613 /* Cast rays to find links between connex components */ 614 #pragma omp for schedule(dynamic) 615 for(ccc = 0; ccc < (int64_t)cc_count; ccc++) { 616 res_T tmp_res = RES_OK; 617 component_id_t c = (component_id_t)ccc; 618 struct s2d_hit hit = S2D_HIT_NULL; 619 float origin[2]; 620 const float dir[2] = { 0, 1 }; 621 const float range[2] = { 0, FLT_MAX }; 622 struct cc_descriptor* const cc = descriptors[c]; 623 component_id_t self_hit_component = cc->cc_id; 624 const double* max_vrtx; 625 626 if(*res != RES_OK) continue; 627 ASSERT(cc->cc_id == c); 628 ASSERT(cc->cc_group_root == CC_GROUP_ID_NONE); 629 ASSERT(cc->max_y_vrtx_id < scn->nverts); 630 631 max_vrtx = positions[cc->max_y_vrtx_id].vec; 632 if(cc->is_outer_border) { 633 ATOMIC id; 634 /* No need to cast a ray */ 635 cc->cc_group_root = cc->cc_id; /* New group with self as root */ 636 id = ATOMIC_INCR(next_enclosure_id) - 1; 637 ASSERT(id <= ENCLOSURE_MAX__); 638 cc->enclosure_id = (enclosure_id_t)id; 639 continue; 640 } 641 642 f2_set_d2(origin, max_vrtx); 643 /* Self-hit data: self hit if hit this component "on the other side" */ 644 tmp_res = s2d_scene_view_trace_ray(s2d_view, origin, dir, range, 645 &self_hit_component, &hit); 646 if(tmp_res != RES_OK) { 647 *res = tmp_res; 648 continue; 649 } 650 /* If no hit, the component is facing an infinite medium */ 651 if(S2D_HIT_NONE(&hit)) { 652 cc->cc_group_root = CC_GROUP_ROOT_INFINITE; 653 cc->enclosure_id = 0; 654 } else { 655 /* If hit, group this component */ 656 const seg_id_t hit_seg_id = (seg_id_t)hit.prim.prim_id; 657 const struct segment_comp* hit_seg_comp = 658 darray_segment_comp_cdata_get(segments_comp) + hit_seg_id; 659 enum senc2d_side hit_side = 660 ((hit.normal[1] < 0) /* Facing geometrical normal of hit */ 661 == ((scn->convention & SENC2D_CONVENTION_NORMAL_FRONT) != 0)) 662 /* Warning: following Embree 2 convention for geometrical normals, 663 * the Star2D hit normal is left-handed while star-enclosure uses 664 * right-handed convention */ 665 ? SENC2D_BACK : SENC2D_FRONT; 666 ASSERT(hit.normal[1] != 0); 667 ASSERT(hit_seg_id < scn->nsegs); 668 669 /* Not really the root until following links */ 670 cc->cc_group_root = hit_seg_comp->component[hit_side]; 671 ASSERT(cc->cc_group_root < cc_count); 672 } 673 } 674 /* Implicit barrier here */ 675 if(*res != RES_OK) return; 676 677 /* One thread post-processes links to group connex components */ 678 #pragma omp single 679 { 680 res_T tmp_res = RES_OK; 681 size_t ec = (size_t)ATOMIC_GET(next_enclosure_id); 682 ASSERT(ec <= ENCLOSURE_MAX__); 683 scn->analyze.enclosures_count = (enclosure_id_t)ec; 684 tmp_res = darray_enclosure_resize(&scn->analyze.enclosures, 685 scn->analyze.enclosures_count); 686 if(tmp_res != RES_OK) { 687 *res = tmp_res; 688 } else { 689 struct enclosure_data* enclosures 690 = darray_enclosure_data_get(&scn->analyze.enclosures); 691 FOR_EACH(ccc, 0, (int64_t)cc_count) { 692 component_id_t c = (component_id_t)ccc; 693 struct cc_descriptor* const cc = descriptors[c]; 694 const struct cc_descriptor* other_desc = cc; 695 struct enclosure_data* enc; 696 #ifndef NDEBUG 697 component_id_t cc_cpt = 0; 698 #endif 699 700 while(other_desc->enclosure_id == CC_GROUP_ID_NONE) { 701 ASSERT(other_desc->cc_group_root < cc_count); 702 other_desc = descriptors[other_desc->cc_group_root]; 703 /* Cannot have more components in cc than cc_count! */ 704 ASSERT(++cc_cpt <= cc_count); 705 } 706 ASSERT(other_desc->cc_group_root != CC_GROUP_ROOT_NONE); 707 ASSERT(other_desc->enclosure_id != CC_GROUP_ID_NONE); 708 cc->cc_group_root = other_desc->cc_group_root; 709 cc->enclosure_id = other_desc->enclosure_id; 710 enc = enclosures + cc->enclosure_id; 711 ++enc->cc_count; 712 /* Linked list of componnents */ 713 enc->first_component = cc->cc_id; 714 enc->side_range.first = MMIN(enc->side_range.first, cc->side_range.first); 715 enc->side_range.last = MMAX(enc->side_range.last, cc->side_range.last); 716 enc->side_count += cc->side_count; 717 tmp_res = bool_array_of_media_merge(&enc->tmp_enclosed_media, cc->media, 718 scn->next_medium_idx + 1); 719 if(tmp_res != RES_OK) { 720 *res = tmp_res; 721 break; 722 } 723 } 724 } 725 } 726 /* Implicit barrier here */ 727 } 728 729 static void 730 collect_and_link_neighbours 731 (struct senc2d_scene* scn, 732 struct segside* segsides, 733 struct darray_segment_tmp* segments_tmp_array, 734 struct darray_frontier_vertex* frontiers, 735 struct htable_overlap* overlaps, 736 /* Shared error status. 737 * We accept to overwrite an error with a different error */ 738 res_T* res) 739 { 740 /* This function is called from an omp parallel block and executed 741 * concurrently. */ 742 const struct segment_in* segments_in; 743 struct segment_tmp* segments_tmp; 744 const union double2* vertices; 745 const int thread_count = omp_get_num_threads(); 746 const int rank = omp_get_thread_num(); 747 const int front = ((scn->convention & SENC2D_CONVENTION_NORMAL_FRONT) != 0); 748 /* Array to keep neighbourhood of vertices 749 * Resize/Push operations on neighbourhood_by_vertex are valid in the 750 * openmp block because a given neighbourhood is only processed 751 * by a single thread */ 752 struct darray_neighbourhood neighbourhood_by_vertex; 753 vrtx_id_t v; 754 seg_id_t s; 755 res_T tmp_res; 756 757 ASSERT(scn && segsides && segments_tmp_array && frontiers && res); 758 ASSERT((size_t)scn->nverts + (size_t)scn->nsegs + 2 <= EDGE_MAX__); 759 760 darray_neighbourhood_init(scn->dev->allocator, &neighbourhood_by_vertex); 761 OK2(darray_neighbourhood_resize(&neighbourhood_by_vertex, scn->nverts)); 762 763 segments_in = darray_segment_in_cdata_get(&scn->segments_in); 764 segments_tmp = darray_segment_tmp_data_get(segments_tmp_array); 765 vertices = darray_position_cdata_get(&scn->vertices); 766 767 ASSERT(scn->nsegs == darray_segment_tmp_size_get(segments_tmp_array)); 768 769 /* Loop on segments to collect edges' neighbours. 770 * All threads considering all the vertices and processing some */ 771 FOR_EACH(s, 0, scn->nsegs) { 772 uchar vv; 773 FOR_EACH(vv, 0, 2) { 774 struct darray_neighbour* neighbourhood; 775 struct neighbour_info* info; 776 const vrtx_id_t vertex = segments_in[s].vertice_id[vv]; 777 size_t sz; 778 /* Process only "my" vertices! */ 779 if((int64_t)vertex % thread_count != rank) continue; 780 /* Find neighbourhood */ 781 ASSERT(vertex < darray_neighbourhood_size_get(&neighbourhood_by_vertex)); 782 neighbourhood = 783 darray_neighbourhood_data_get(&neighbourhood_by_vertex) + vertex; 784 sz = darray_neighbour_size_get(neighbourhood); 785 /* Make room for this neighbour */ 786 if(darray_neighbour_capacity(neighbourhood) == sz) { 787 /* 2 seems to be a good guess for initial capacity */ 788 size_t new_sz = sz ? sz + 1 : 2; 789 tmp_res = darray_neighbour_reserve(neighbourhood, new_sz); 790 if(tmp_res != RES_OK) { 791 *res = tmp_res; 792 return; 793 } 794 } 795 tmp_res = darray_neighbour_resize(neighbourhood, 1 + sz); 796 if(tmp_res != RES_OK) { 797 *res = tmp_res; 798 return; 799 } 800 /* Add neighbour info to vertex's neighbour list */ 801 info = darray_neighbour_data_get(neighbourhood) + sz; 802 info->seg_id = s; 803 info->common_vertex_rank = vv; 804 } 805 } 806 /* When a thread has build his share of neighbourhoods 807 * it can process them whithout waiting for other threads 808 * (no barrier needed here). */ 809 810 if(*res != RES_OK) return; 811 812 /* For each of "my" vertices sort segments sides by rotation angle 813 * and connect neighbours. 814 * All threads considering all the vertices and processing some */ 815 FOR_EACH(v, 0, scn->nverts) { 816 const vrtx_id_t common_vrtx = v; 817 vrtx_id_t other_vrtx; 818 struct darray_neighbour* neighbourhood; 819 side_id_t i, neighbour_count; 820 double a; 821 size_t sz; 822 /* Process only "my" neighbourhoods! */ 823 if((int64_t)v % thread_count != rank) continue; 824 neighbourhood 825 = darray_neighbourhood_data_get(&neighbourhood_by_vertex) + v; 826 sz = darray_neighbour_size_get(neighbourhood); 827 /* sz can be 0 as a vertex can be unused */ 828 if(!sz) continue; 829 ASSERT(sz <= SIDE_MAX__); 830 neighbour_count = (side_id_t)sz; 831 FOR_EACH(i, 0, neighbour_count) { 832 double max_y, disp[2]; 833 struct neighbour_info* neighbour_info 834 = darray_neighbour_data_get(neighbourhood) + i; 835 const struct segment_in* seg_in = segments_in + neighbour_info->seg_id; 836 struct segment_tmp* neighbour = segments_tmp + neighbour_info->seg_id; 837 union double2 n; /* Geometrical normal to neighbour segment */ 838 const int is_reversed = neighbour_info->common_vertex_rank; 839 840 other_vrtx = 841 seg_in->vertice_id[(neighbour_info->common_vertex_rank + 1) % 2]; 842 max_y = MMAX(vertices[other_vrtx].pos.y, vertices[common_vrtx].pos.y); 843 ASSERT(neighbour->max_y <= max_y); 844 neighbour->max_y = max_y; 845 /* Compute rotation angle around common vertex (in world system) */ 846 d2_sub(disp, vertices[other_vrtx].vec, vertices[common_vrtx].vec); 847 ASSERT(disp[0] || disp[1]); 848 neighbour_info->angle = atan2(disp[1], disp[0]); /* in ]-pi + pi]*/ 849 if(is_reversed) 850 d2(n.vec, +disp[1], -disp[0]); 851 else 852 d2(n.vec, -disp[1], +disp[0]); 853 854 /* Normal orientation calculation. */ 855 if(neighbour_info->angle > 3 * PI / 4) { 856 ASSERT(n.pos.y); 857 neighbour_info->normal_toward_next_neighbour = (n.pos.y < 0); 858 } else if(neighbour_info->angle > PI / 4) { 859 ASSERT(n.pos.x); 860 neighbour_info->normal_toward_next_neighbour = (n.pos.x < 0); 861 } else if(neighbour_info->angle > -PI / 4) { 862 ASSERT(n.pos.y); 863 neighbour_info->normal_toward_next_neighbour = (n.pos.y > 0); 864 } else if(neighbour_info->angle > -3 * PI / 4) { 865 ASSERT(n.pos.x); 866 neighbour_info->normal_toward_next_neighbour = (n.pos.x > 0); 867 } else { 868 ASSERT(n.pos.y); 869 neighbour_info->normal_toward_next_neighbour = (n.pos.y < 0); 870 } 871 } 872 /* Sort segments by rotation angle */ 873 qsort(darray_neighbour_data_get(neighbourhood), neighbour_count, 874 sizeof(struct neighbour_info), neighbour_cmp); 875 /* Link sides. 876 * Create cycles of sides by neighbourhood around common vertex. */ 877 a = -DBL_MAX; 878 FOR_EACH(i, 0, neighbour_count) { 879 /* Neighbourhood info for current pair of segments */ 880 const struct neighbour_info* current 881 = darray_neighbour_cdata_get(neighbourhood) + i; 882 const struct neighbour_info* ccw_neighbour 883 = darray_neighbour_cdata_get(neighbourhood) + (i + 1) % neighbour_count; 884 /* Rank of the end of interest in segments */ 885 const uchar crt_end = current->common_vertex_rank; 886 /* Here ccw refers to the rotation around the common vertex 887 * and has nothing to do with vertices order in segment definition 888 * nor Front/Back side convention */ 889 const uchar ccw_end = ccw_neighbour->common_vertex_rank; 890 /* User id of current segments */ 891 const seg_id_t crt_id = current->seg_id; 892 const seg_id_t ccw_id = ccw_neighbour->seg_id; 893 /* Facing sides of segments */ 894 const enum senc2d_side crt_side 895 = (current->normal_toward_next_neighbour == front) 896 ? SENC2D_FRONT : SENC2D_BACK; 897 const enum senc2d_side ccw_side 898 = (ccw_neighbour->normal_toward_next_neighbour == front) 899 ? SENC2D_BACK : SENC2D_FRONT; 900 /* Index of sides in segsides */ 901 const side_id_t crt_side_idx = SEGIDxSIDE_2_SEGSIDE(crt_id, crt_side); 902 const side_id_t ccw_side_idx = SEGIDxSIDE_2_SEGSIDE(ccw_id, ccw_side); 903 /* Side ptrs */ 904 struct segside* const p_crt_side = segsides + crt_side_idx; 905 struct segside* const p_ccw_side = segsides + ccw_side_idx; 906 /* Check that angle is a discriminant property */ 907 ASSERT(a <= current->angle); /* Is sorted */ 908 if(a == current->angle) { 909 /* Two consecutive segments with same angle! Store them */ 910 const struct neighbour_info* previous; 911 seg_id_t prev_id; 912 previous = darray_neighbour_cdata_get(neighbourhood) + i - 1; 913 prev_id = previous->seg_id; 914 #pragma omp critical 915 { 916 char one = 1; 917 tmp_res = htable_overlap_set(overlaps, &crt_id, &one); 918 if(tmp_res == RES_OK) 919 tmp_res = htable_overlap_set(overlaps, &prev_id, &one); 920 } 921 if(tmp_res != RES_OK) goto tmp_error; 922 } 923 a = current->angle; 924 /* Link sides */ 925 ASSERT(p_crt_side->facing_side_id[crt_end] == SIDE_NULL__); 926 ASSERT(p_ccw_side->facing_side_id[ccw_end] == SIDE_NULL__); 927 p_crt_side->facing_side_id[crt_end] = ccw_side_idx; 928 p_ccw_side->facing_side_id[ccw_end] = crt_side_idx; 929 /* Record media */ 930 ASSERT(p_crt_side->medium == MEDIUM_NULL__ 931 || p_crt_side->medium == segments_in[crt_id].medium[crt_side]); 932 ASSERT(p_ccw_side->medium == MEDIUM_NULL__ 933 || p_ccw_side->medium == segments_in[ccw_id].medium[ccw_side]); 934 p_crt_side->medium = segments_in[crt_id].medium[crt_side]; 935 p_ccw_side->medium = segments_in[ccw_id].medium[ccw_side]; 936 ASSERT(p_crt_side->medium == SENC2D_UNSPECIFIED_MEDIUM 937 || p_crt_side->medium < scn->next_medium_idx); 938 ASSERT(p_ccw_side->medium == SENC2D_UNSPECIFIED_MEDIUM 939 || p_ccw_side->medium < scn->next_medium_idx); 940 /* Detect segments that could surround a hole: 941 * - single segment on (one of) its end 942 * - different media on its sides */ 943 if(neighbour_count == 1 944 && p_crt_side->medium != p_ccw_side->medium) 945 #pragma omp critical 946 { 947 struct frontier_vertex frontier_vertex; 948 frontier_vertex.seg = crt_id; 949 frontier_vertex.vrtx = v; 950 darray_frontier_vertex_push_back(frontiers, &frontier_vertex); 951 } 952 } 953 } 954 955 tmp_error: 956 if(tmp_res != RES_OK) *res = tmp_res; 957 /* Threads are allowed to return whitout sync. */ 958 darray_neighbourhood_release(&neighbourhood_by_vertex); 959 } 960 961 static int 962 compare_enclosures 963 (const void* ptr1, const void* ptr2) 964 { 965 const struct enclosure_data* e1 = ptr1; 966 const struct enclosure_data* e2 = ptr2; 967 ASSERT(e1->side_range.first != e2->side_range.first); 968 return (int)e1->side_range.first - (int)e2->side_range.first; 969 } 970 971 static void 972 build_result 973 (struct senc2d_scene* scn, 974 const struct darray_ptr_component_descriptor* connex_components, 975 const struct darray_segment_comp* segments_comp_array, 976 struct darray_frontier_vertex* frontiers, 977 enclosure_id_t* ordered_ids, 978 /* Shared error status. 979 * We accept to overwrite an error with a different error */ 980 res_T* res) 981 { 982 /* This function is called from an omp parallel block and executed 983 * concurrently. */ 984 struct mem_allocator* alloc; 985 struct cc_descriptor* const* cc_descriptors; 986 struct enclosure_data* enclosures; 987 const struct segment_in* segments_in; 988 struct segment_enc* segments_enc; 989 const struct segment_comp* segments_comp; 990 struct htable_vrtx_id vtable; 991 int output_normal_in, normals_front, normals_back; 992 size_t cc_count; 993 int64_t sg; 994 int64_t ee; 995 996 ASSERT(scn && connex_components && segments_comp_array && frontiers && res); 997 998 alloc = scn->dev->allocator; 999 output_normal_in = (scn->convention & SENC2D_CONVENTION_NORMAL_INSIDE) != 0; 1000 normals_front = (scn->convention & SENC2D_CONVENTION_NORMAL_FRONT) != 0; 1001 normals_back = (scn->convention & SENC2D_CONVENTION_NORMAL_BACK) != 0; 1002 ASSERT(normals_back != normals_front); 1003 ASSERT(output_normal_in 1004 != ((scn->convention & SENC2D_CONVENTION_NORMAL_OUTSIDE) != 0)); 1005 cc_count = darray_ptr_component_descriptor_size_get(connex_components); 1006 ASSERT(cc_count <= COMPONENT_MAX__); 1007 cc_descriptors = darray_ptr_component_descriptor_cdata_get(connex_components); 1008 enclosures = darray_enclosure_data_get(&scn->analyze.enclosures); 1009 segments_in = darray_segment_in_cdata_get(&scn->segments_in); 1010 segments_comp = darray_segment_comp_cdata_get(segments_comp_array); 1011 1012 #pragma omp single 1013 { 1014 enclosure_id_t e; 1015 size_t d; 1016 res_T tmp_res = 1017 darray_segment_enc_resize(&scn->analyze.segments_enc, scn->nsegs); 1018 if(tmp_res != RES_OK) { 1019 *res = tmp_res; 1020 goto single_err; 1021 } 1022 /* Store initial enclosure order */ 1023 FOR_EACH(e, 0, scn->analyze.enclosures_count) 1024 enclosures[e].header.enclosure_id = e; 1025 /* Move enclosures by first side while keeping enclosure 0 1026 * at rank 0 (its a convention) */ 1027 qsort(enclosures + 1, scn->analyze.enclosures_count - 1, 1028 sizeof(*enclosures), compare_enclosures); 1029 /* Make conversion table */ 1030 FOR_EACH(e, 0, scn->analyze.enclosures_count) { 1031 enclosure_id_t rank = enclosures[e].header.enclosure_id; 1032 ordered_ids[rank] = e; 1033 } 1034 FOR_EACH(d, 0, cc_count) { 1035 enclosure_id_t new_id = ordered_ids[cc_descriptors[d]->enclosure_id]; 1036 /* Update the enclosure ID in cc_descriptor */ 1037 cc_descriptors[d]->enclosure_id = new_id; 1038 /* Sum up areas and volumes of components int oenclosures */ 1039 enclosures[new_id].header.area += cc_descriptors[d]->area; 1040 enclosures[new_id].header.volume += cc_descriptors[d]->_2volume / 2; 1041 } 1042 single_err: (void)0; 1043 }/* Implicit barrier here. */ 1044 if(*res != RES_OK) goto exit; 1045 segments_enc = darray_segment_enc_data_get(&scn->analyze.segments_enc); 1046 1047 /* Build global enclosure information */ 1048 #pragma omp for 1049 for(sg = 0; sg < (int64_t)scn->nsegs; sg++) { 1050 seg_id_t s = (seg_id_t)sg; 1051 const component_id_t cf_id = segments_comp[s].component[SENC2D_FRONT]; 1052 const component_id_t cb_id = segments_comp[s].component[SENC2D_BACK]; 1053 const struct cc_descriptor* cf = cc_descriptors[cf_id]; 1054 const struct cc_descriptor* cb = cc_descriptors[cb_id]; 1055 const enclosure_id_t ef_id = cf->enclosure_id; 1056 const enclosure_id_t eb_id = cb->enclosure_id; 1057 ASSERT(segments_enc[s].enclosure[SENC2D_FRONT] == ENCLOSURE_NULL__); 1058 segments_enc[s].enclosure[SENC2D_FRONT] = ef_id; 1059 ASSERT(segments_enc[s].enclosure[SENC2D_BACK] == ENCLOSURE_NULL__); 1060 segments_enc[s].enclosure[SENC2D_BACK] = eb_id; 1061 } 1062 /* Implicit barrier here */ 1063 1064 /* Resize/push operations on enclosure's fields are valid in the 1065 * openmp block as a given enclosure is processed by a single thread */ 1066 htable_vrtx_id_init(alloc, &vtable); 1067 1068 ASSERT(scn->analyze.enclosures_count <= ENCLOSURE_MAX__); 1069 #pragma omp for schedule(dynamic) nowait 1070 for(ee = 0; ee < (int64_t)scn->analyze.enclosures_count; ee++) { 1071 const enclosure_id_t e = (enclosure_id_t)ee; 1072 struct enclosure_data* enc = enclosures + e; 1073 seg_id_t fst_idx = 0; 1074 seg_id_t sgd_idx = enc->side_count; 1075 seg_id_t s; 1076 medium_id_t m; 1077 res_T tmp_res = RES_OK; 1078 ASSERT(enc->first_component < cc_count); 1079 ASSERT(cc_descriptors[enc->first_component]->cc_id 1080 == enc->first_component); 1081 1082 if(*res != RES_OK) continue; 1083 ASSERT(e <= ENCLOSURE_MAX__); 1084 enc->header.enclosure_id = (unsigned)ee; /* Back to API type */ 1085 ASSERT(cc_descriptors[enc->first_component]->enclosure_id 1086 == enc->header.enclosure_id); 1087 enc->header.is_infinite = (e == 0); 1088 1089 ASSERT(enc->header.enclosed_media_count < 1 + scn->next_medium_idx); 1090 OK2(bool_array_of_media_to_darray_media 1091 (&enc->enclosed_media, &enc->tmp_enclosed_media, scn->next_medium_idx)); 1092 ASSERT(darray_media_size_get(&enc->enclosed_media) <= MEDIUM_MAX__); 1093 enc->header.enclosed_media_count 1094 = (medium_id_t)darray_media_size_get(&enc->enclosed_media); 1095 darray_uchar_purge(&enc->tmp_enclosed_media); 1096 1097 /* Add this enclosure in relevant by-medium lists */ 1098 FOR_EACH(m, 0, enc->header.enclosed_media_count) { 1099 medium_id_t medium = darray_media_cdata_get(&enc->enclosed_media)[m]; 1100 size_t m_idx = medium_id_2_medium_idx(medium); 1101 struct darray_enc_id* enc_ids_array_by_medium; 1102 ASSERT(medium == SENC2D_UNSPECIFIED_MEDIUM || medium < scn->next_medium_idx); 1103 ASSERT(darray_enc_ids_array_size_get(&scn->analyze.enc_ids_array_by_medium) 1104 == 1 + scn->next_medium_idx); 1105 enc_ids_array_by_medium = 1106 darray_enc_ids_array_data_get(&scn->analyze.enc_ids_array_by_medium) + m_idx; 1107 #pragma omp critical 1108 { 1109 tmp_res = darray_enc_id_push_back(enc_ids_array_by_medium, &e); 1110 } 1111 if(tmp_res != RES_OK) { 1112 *res = tmp_res; 1113 break; 1114 } 1115 } 1116 if(*res != RES_OK) continue; 1117 1118 /* Build side and vertex lists. */ 1119 OK2(darray_sides_enc_resize(&enc->sides, enc->side_count)); 1120 /* Size is just a int */ 1121 OK2(darray_vrtx_id_reserve(&enc->vertices, 1122 (size_t)(enc->side_count * 0.6))); 1123 /* New vertex numbering scheme local to the enclosure */ 1124 htable_vrtx_id_clear(&vtable); 1125 ASSERT(scn->nsegs == darray_segment_in_size_get(&scn->segments_in)); 1126 /* Put at the end the back-faces of segments that also have their 1127 * front-face in the list. */ 1128 for(s = SEGSIDE_2_SEG(enc->side_range.first); 1129 s <= SEGSIDE_2_SEG(enc->side_range.last); 1130 s++) 1131 { 1132 const struct segment_in* seg_in = segments_in + s; 1133 struct side_enc* side_enc; 1134 vrtx_id_t vertice_id[2]; 1135 int i; 1136 if(segments_enc[s].enclosure[SENC2D_FRONT] != e 1137 && segments_enc[s].enclosure[SENC2D_BACK] != e) 1138 continue; 1139 ++enc->header.unique_primitives_count; 1140 1141 FOR_EACH(i, 0, 2) { 1142 vrtx_id_t* p_id = htable_vrtx_id_find(&vtable, seg_in->vertice_id + i); 1143 if(p_id) { 1144 vertice_id[i] = *p_id; /* Known vertex */ 1145 } else { 1146 /* Create new association */ 1147 size_t tmp = htable_vrtx_id_size_get(&vtable); 1148 ASSERT(tmp == darray_vrtx_id_size_get(&enc->vertices)); 1149 ASSERT(tmp < VRTX_MAX__); 1150 vertice_id[i] = (vrtx_id_t)tmp; 1151 OK2(htable_vrtx_id_set(&vtable, seg_in->vertice_id + i, 1152 vertice_id + i)); 1153 OK2(darray_vrtx_id_push_back(&enc->vertices, seg_in->vertice_id + i)); 1154 ++enc->header.vertices_count; 1155 } 1156 } 1157 ASSERT(segments_enc[s].enclosure[SENC2D_FRONT] == e 1158 || segments_enc[s].enclosure[SENC2D_BACK] == e); 1159 if(segments_enc[s].enclosure[SENC2D_FRONT] == e) { 1160 /* Front side of the original segment is member of the enclosure */ 1161 int input_normal_in = normals_front; 1162 int revert_segment = (input_normal_in != output_normal_in); 1163 ++enc->header.primitives_count; 1164 side_enc = darray_sides_enc_data_get(&enc->sides) + fst_idx++; 1165 FOR_EACH(i, 0, 2) { 1166 int ii = revert_segment ? 1 - i : i; 1167 side_enc->vertice_id[i] = vertice_id[ii]; 1168 } 1169 side_enc->side_id = SEGIDxSIDE_2_SEGSIDE(s, SENC2D_FRONT); 1170 } 1171 if(segments_enc[s].enclosure[SENC2D_BACK] == e) { 1172 /* Back side of the original segment is member of the enclosure */ 1173 int input_normal_in = normals_back; 1174 int revert_segment = (input_normal_in != output_normal_in); 1175 ++enc->header.primitives_count; 1176 /* If both sides are in the enclosure, put the second side at the end */ 1177 side_enc = darray_sides_enc_data_get(&enc->sides) + 1178 ((segments_enc[s].enclosure[SENC2D_FRONT] == e) ? --sgd_idx : fst_idx++); 1179 FOR_EACH(i, 0, 2) { 1180 int ii = revert_segment ? 1 - i : i; 1181 side_enc->vertice_id[i] = vertice_id[ii]; 1182 } 1183 side_enc->side_id = SEGIDxSIDE_2_SEGSIDE(s, SENC2D_BACK); 1184 } 1185 if(fst_idx == sgd_idx) break; 1186 } 1187 continue; 1188 tmp_error: 1189 ASSERT(tmp_res != RES_OK); 1190 *res = tmp_res; 1191 } /* No barrier here */ 1192 htable_vrtx_id_release(&vtable); 1193 /* The first thread here copies frontiers into descriptor */ 1194 #pragma omp single nowait 1195 darray_frontier_vertex_copy_and_clear(&scn->analyze.frontiers, frontiers); 1196 /* No barrier here */ 1197 exit: 1198 return; 1199 } 1200 1201 /****************************************************************************** 1202 * Exported functions 1203 *****************************************************************************/ 1204 res_T 1205 scene_analyze 1206 (struct senc2d_scene* scn) 1207 { 1208 /* By segment tmp data */ 1209 struct darray_segment_tmp segments_tmp; 1210 char segments_tmp_initialized = 0; 1211 /* Array of connex components. 1212 * They are refered to by arrays of ids. */ 1213 struct darray_ptr_component_descriptor connex_components; 1214 char connex_components_initialized = 0; 1215 /* Array of frontier vertices */ 1216 struct darray_frontier_vertex frontiers; 1217 char frontiers_initialized = 0; 1218 /* Htable used to store overlapping segments */ 1219 struct htable_overlap overlaps; 1220 char overlaps_initialized = 0; 1221 /* Store by-segment components */ 1222 struct darray_segment_comp segments_comp; 1223 char segments_comp_initialized = 0; 1224 /* Array of segment ends. */ 1225 struct segside* segsides = NULL; 1226 struct s2d_scene_view* s2d_view = NULL; 1227 /* Atomic counters to share beetwen threads */ 1228 ATOMIC component_count = 0; 1229 ATOMIC next_enclosure_id = 1; 1230 enclosure_id_t* ordered_ids = NULL; 1231 res_T res = RES_OK; 1232 res_T res2 = RES_OK; 1233 1234 if(!scn) return RES_BAD_ARG; 1235 1236 if(!scn->nsegs) goto exit; 1237 1238 darray_segment_tmp_init(scn->dev->allocator, &segments_tmp); 1239 segments_tmp_initialized = 1; 1240 darray_frontier_vertex_init(scn->dev->allocator, &frontiers); 1241 frontiers_initialized = 1; 1242 htable_overlap_init(scn->dev->allocator, &overlaps); 1243 overlaps_initialized = 1; 1244 1245 OK(darray_segment_tmp_resize(&segments_tmp, scn->nsegs)); 1246 segsides 1247 = MEM_CALLOC(scn->dev->allocator, 2 * scn->nsegs, sizeof(struct segside)); 1248 if(!segsides) { 1249 res = RES_MEM_ERR; 1250 goto error; 1251 } 1252 #ifndef NDEBUG 1253 else { 1254 /* Initialise segsides to allow assert code */ 1255 size_t i; 1256 FOR_EACH(i, 0, 2 * scn->nsegs) 1257 init_segside(scn->dev->allocator, segsides + i); 1258 } 1259 #endif 1260 1261 /* The end of the analyze is multithreaded */ 1262 ASSERT(scn->dev->nthreads > 0); 1263 #pragma omp parallel num_threads(scn->dev->nthreads) 1264 { 1265 /* Step 1: build neighbourhoods */ 1266 collect_and_link_neighbours(scn, segsides, &segments_tmp, &frontiers, 1267 &overlaps, &res); 1268 /* No barrier at the end of step 1: data used in step 1 cannot be 1269 * released / data produced by step 1 cannot be used 1270 * until next sync point */ 1271 1272 /* The first thread here allocates some data. 1273 * Barrier needed at the end to ensure data created before any use. */ 1274 #pragma omp single 1275 { 1276 res_T tmp_res = RES_OK; 1277 darray_ptr_component_descriptor_init(scn->dev->allocator, 1278 &connex_components); 1279 connex_components_initialized = 1; 1280 /* Just a hint; to limit contention */ 1281 OK2(darray_ptr_component_descriptor_reserve(&connex_components, 1282 2 + 2 * scn->next_medium_idx)); 1283 darray_segment_comp_init(scn->dev->allocator, &segments_comp); 1284 segments_comp_initialized = 1; 1285 OK2(darray_segment_comp_resize(&segments_comp, scn->nsegs)); 1286 tmp_error: 1287 if(tmp_res != RES_OK) res2 = tmp_res; 1288 } 1289 /* Implicit barrier here: constraints on step 1 data are now met */ 1290 1291 #pragma omp single 1292 { 1293 /* Save all the overlapping segments in a darray */ 1294 res_T tmp_res = RES_OK; 1295 struct htable_overlap_iterator it, end; 1296 ASSERT(overlaps_initialized); 1297 htable_overlap_begin(&overlaps, &it); 1298 htable_overlap_end(&overlaps, &end); 1299 tmp_res = darray_seg_id_reserve(&scn->analyze.overlapping_ids, 1300 htable_overlap_size_get(&overlaps)); 1301 if(tmp_res != RES_OK) goto tmp_error2; 1302 while (!htable_overlap_iterator_eq(&it, &end)) { 1303 tmp_res = darray_seg_id_push_back(&scn->analyze.overlapping_ids, 1304 htable_overlap_iterator_key_get(&it)); 1305 if(tmp_res != RES_OK) goto tmp_error2; 1306 htable_overlap_iterator_next(&it); 1307 } 1308 qsort(darray_seg_id_data_get(&scn->analyze.overlapping_ids), 1309 darray_seg_id_size_get(&scn->analyze.overlapping_ids), 1310 sizeof(*darray_seg_id_cdata_get(&scn->analyze.overlapping_ids)), 1311 cmp_seg_id); 1312 htable_overlap_release(&overlaps); 1313 overlaps_initialized = 0; 1314 tmp_error2: 1315 if (tmp_res != RES_OK) res2 = tmp_res; 1316 } 1317 1318 if(darray_seg_id_size_get(&scn->analyze.overlapping_ids)) { 1319 /* Stop analysis here as the model is ill-formed */ 1320 goto end_; 1321 } 1322 1323 if(res != RES_OK || res2 != RES_OK) { 1324 #pragma omp single nowait 1325 { 1326 if(res != RES_OK) { 1327 log_err(scn->dev, 1328 LIB_NAME":%s: could not build neighbourhoods from scene.\n", 1329 FUNC_NAME); 1330 } else { 1331 res = res2; 1332 } 1333 } 1334 goto error_; 1335 } 1336 1337 /* Step 2: extract segment connex components */ 1338 extract_connex_components(scn, segsides, &connex_components, 1339 &segments_tmp, &segments_comp, &s2d_view, &component_count, &res); 1340 /* No barrier at the end of step 2: data used in step 2 cannot be 1341 * released / data produced by step 2 cannot be used 1342 * until next sync point */ 1343 1344 #pragma omp barrier 1345 /* Constraints on step 2 data are now met */ 1346 1347 if(res != RES_OK) { 1348 #pragma omp single nowait 1349 { 1350 log_err(scn->dev, 1351 LIB_NAME":%s: could not extract connex components from scene.\n", 1352 FUNC_NAME); 1353 } /* No barrier here */ 1354 goto error_; 1355 } 1356 1357 /* One thread releases some data before going to step 3, 1358 * the others go to step 3 without sync */ 1359 #pragma omp single nowait 1360 { 1361 darray_segment_tmp_release(&segments_tmp); 1362 segments_tmp_initialized = 0; 1363 } /* No barrier here */ 1364 1365 /* Step 3: group components */ 1366 group_connex_components(scn, &segments_comp, &connex_components, s2d_view, 1367 &next_enclosure_id, &res); 1368 /* Barrier at the end of step 3: data used in step 3 can be released / 1369 * data produced by step 3 can be used */ 1370 1371 if(res != RES_OK) { 1372 #pragma omp single nowait 1373 { 1374 log_err(scn->dev, 1375 LIB_NAME":%s: could not group connex components from scene.\n", 1376 FUNC_NAME); 1377 } 1378 goto error_; 1379 } 1380 1381 /* One thread releases some data and allocate other data before going to 1382 * step 4, the others waiting for alloced data */ 1383 #pragma omp single 1384 { 1385 if(s2d_view) S2D(scene_view_ref_put(s2d_view)); 1386 s2d_view = NULL; 1387 ordered_ids = MEM_ALLOC(scn->dev->allocator, 1388 sizeof(*ordered_ids) * scn->analyze.enclosures_count); 1389 if(!ordered_ids) res = RES_MEM_ERR; 1390 } /* Implicit barrier here */ 1391 if(res != RES_OK) goto error_; 1392 1393 /* Step 4: Build result */ 1394 build_result(scn, &connex_components, &segments_comp, &frontiers, 1395 ordered_ids, &res); 1396 /* No barrier at the end of step 4: data used in step 4 cannot be 1397 * released / data produced by step 4 cannot be used 1398 * until next sync point */ 1399 1400 #pragma omp barrier 1401 /* Constraints on step 4 data are now met */ 1402 1403 if(res != RES_OK) { 1404 #pragma omp single nowait 1405 { 1406 log_err(scn->dev, LIB_NAME":%s: could not build result.\n", FUNC_NAME); 1407 } 1408 goto error_; 1409 } 1410 1411 /* Some threads release data */ 1412 #pragma omp sections nowait 1413 { 1414 #pragma omp section 1415 { 1416 MEM_RM(scn->dev->allocator, ordered_ids); 1417 custom_darray_ptr_component_descriptor_release(&connex_components); 1418 connex_components_initialized = 0; 1419 } 1420 #pragma omp section 1421 { 1422 darray_segment_comp_release(&segments_comp); 1423 segments_comp_initialized = 0; 1424 } 1425 } /* No barrier here */ 1426 1427 end_: 1428 error_: 1429 ; 1430 } /* Implicit barrier here */ 1431 1432 if(res != RES_OK) goto error; 1433 exit: 1434 if(connex_components_initialized) 1435 custom_darray_ptr_component_descriptor_release(&connex_components); 1436 if(s2d_view) S2D(scene_view_ref_put(s2d_view)); 1437 if(segments_tmp_initialized) darray_segment_tmp_release(&segments_tmp); 1438 if(segments_comp_initialized) darray_segment_comp_release(&segments_comp); 1439 if(frontiers_initialized) darray_frontier_vertex_release(&frontiers); 1440 if(overlaps_initialized) htable_overlap_release(&overlaps); 1441 if(segsides) MEM_RM(scn->dev->allocator, segsides); 1442 1443 return res; 1444 error: 1445 goto exit; 1446 }