Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
concurrent_vector.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
21 #if (_MSC_VER)
22  //MSVC 10 "deprecated" application of some std:: algorithms to raw pointers as not safe.
23  //The reason is that destination is not checked against bounds/having enough place.
24  #define _SCL_SECURE_NO_WARNINGS
25 #endif
26 
27 #include "tbb/concurrent_vector.h"
29 #include "tbb/tbb_exception.h"
30 #include "tbb_misc.h"
31 #include "itt_notify.h"
32 
33 #include <cstring>
34 #include <memory> //for uninitialized_fill_n
35 
36 #if defined(_MSC_VER) && defined(_Wp64)
37  // Workaround for overzealous compiler warnings in /Wp64 mode
38  #pragma warning (disable: 4267)
39 #endif
40 
41 namespace tbb {
42 
43 namespace internal {
45 public:
47  static const size_type page_size = 4096;
48 
49  inline static bool incompact_predicate(size_type size) { // assert size != 0, see source/test/test_vector_layout.cpp
50  return size < page_size || ((size-1)%page_size < page_size/2 && size < page_size * 128); // for more details
51  }
52 
54  segment_t *s = v.my_segment;
56  segment_index_t k = 0;
57  while( k < u && (s[k].load<relaxed>()==segment_allocated() ))
58  ++k;
59  return k;
60  }
61 
62  // TODO: optimize accesses to my_first_block
65  if( !v.my_first_block ) {
66  /* There was a suggestion to set first segment according to incompact_predicate:
67  while( k && !helper::incompact_predicate(segment_size( k ) * element_size) )
68  --k; // while previous vector size is compact, decrement
69  // reasons to not do it:
70  // * constructor(n) is not ready to accept fragmented segments
71  // * backward compatibility due to that constructor
72  // * current version gives additional guarantee and faster init.
73  // * two calls to reserve() will give the same effect.
74  */
75  v.my_first_block.compare_and_swap(k+1, 0); // store number of segments
76  }
77  }
78 
80  void *ptr = v.vector_allocator_ptr(v, n);
81  if(!ptr) throw_exception(eid_bad_alloc); // check for bad allocation, throw exception
82  return ptr;
83  }
84 
86  template<typename argument_type>
87  inline static void publish_segment( segment_t& s, argument_type rhs ) {
88  // see also itt_store_pointer_with_release_v3()
90  s.store<release>(rhs);
91  }
92 
93  static size_type enable_segment(concurrent_vector_base_v3 &v, size_type k, size_type element_size, bool mark_as_not_used_on_failure = false);
94 
95  // TODO: rename as get_segments_table() and return segment pointer
99  }
100 
102 
105  segment_not_used_predicate(segment_t &segment) : s(segment) {}
106  bool operator()() const { return s.load<relaxed>() == segment_not_used ();}
107  };
109  segment_t &s = v.my_segment[index]; // TODO: pass v.my_segment as argument
110  if( s.load<acquire>() == segment_not_used() ) { // do not check for segment_allocation_failed state
111  if( owner ) {
112  enable_segment( v, index, element_size );
113  } else {
114  ITT_NOTIFY(sync_prepare, &s);
116  ITT_NOTIFY(sync_acquired, &s);
117  }
118  } else {
119  ITT_NOTIFY(sync_acquired, &s);
120  }
121  enforce_segment_allocated(s.load<relaxed>()); //it's hard to recover correctly after segment_allocation_failed state
122  return s;
123  }
124 
126  segment_t *table;// TODO: review all segment_index_t as just short type
128  helper(segment_t *segments, size_type fb, size_type esize, size_type index, size_type s, size_type f) throw()
129  : table(segments), first_block(fb), k(index), sz(0), start(s), finish(f), element_size(esize) {}
130  inline void first_segment() throw() {
131  __TBB_ASSERT( start <= finish, NULL );
132  __TBB_ASSERT( first_block || !finish, NULL );
133  if( k < first_block ) k = 0; // process solid segment at a time
134  size_type base = segment_base( k );
135  __TBB_ASSERT( base <= start, NULL );
136  finish -= base; start -= base; // rebase as offsets from segment k
137  sz = k ? base : segment_size( first_block ); // sz==base for k>0
138  }
139  inline void next_segment() throw() {
140  finish -= sz; start = 0; // offsets from next segment
141  if( !k ) k = first_block;
142  else { ++k; sz = segment_size( k ); }
143  }
144  template<typename F>
145  inline size_type apply(const F &func) {
146  first_segment();
147  while( sz < finish ) { // work for more than one segment
148  //TODO: remove extra load() of table[k] inside func
149  func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, sz - start );
150  next_segment();
151  }
152  func( table[k], table[k].load<relaxed>().pointer<char>() + element_size*start, finish - start );
153  return k;
154  }
155  inline segment_value_t get_segment_value(size_type index, bool wait) {
156  segment_t &s = table[index];
157  if( wait && (s.load<acquire>() == segment_not_used()) ) {
158  ITT_NOTIFY(sync_prepare, &s);
160  ITT_NOTIFY(sync_acquired, &s);
161  }
162  return s.load<relaxed>();
163  }
165  if( sz >= finish ) return; // the work is done correctly
166  cleanup();
167  }
168 
170  void cleanup();
171 
173  struct init_body {
175  const void *arg;
176  init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {}
177  void operator()(segment_t &, void *begin, size_type n) const {
178  func( begin, arg, n );
179  }
180  };
181  struct safe_init_body {
183  const void *arg;
184  safe_init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {}
185  void operator()(segment_t &s, void *begin, size_type n) const {
187  func( begin, arg, n );
188  }
189  };
190  struct destroy_body {
192  destroy_body(internal_array_op1 destroy) : func(destroy) {}
193  void operator()(segment_t &s, void *begin, size_type n) const {
194  if(s.load<relaxed>() == segment_allocated())
195  func( begin, n );
196  }
197  };
198 }; // class helper
199 
202  // If other threads are trying to set pointers in the short segment, wait for them to finish their
203  // assignments before we copy the short segment to the long segment. Note: grow_to_at_least depends on it
204  for( segment_index_t i = 0; segment_base(i) < start && v.my_segment == v.my_storage; i++ ){
205  if(v.my_storage[i].load<relaxed>() == segment_not_used()) {
206  ITT_NOTIFY(sync_prepare, &v.my_storage[i]);
207  atomic_backoff backoff(true);
208  while( v.my_segment == v.my_storage && (v.my_storage[i].load<relaxed>() == segment_not_used()) )
209  backoff.pause();
210  ITT_NOTIFY(sync_acquired, &v.my_storage[i]);
211  }
212  }
213  if( v.my_segment != v.my_storage ) return;
214 
215  segment_t* new_segment_table = (segment_t*)NFS_Allocate( pointers_per_long_table, sizeof(segment_t), NULL );
216  __TBB_ASSERT(new_segment_table, "NFS_Allocate should throws exception if it cannot allocate the requested storage, and not returns zero pointer" );
217  std::uninitialized_fill_n(new_segment_table,size_t(pointers_per_long_table),segment_t()); //init newly allocated table
218  //TODO: replace with static assert
219  __TBB_STATIC_ASSERT(pointers_per_long_table >= pointers_per_short_table, "size of the big table should be not lesser than of the small one, as we copy values to it" );
220  std::copy(v.my_storage, v.my_storage+pointers_per_short_table, new_segment_table);//copy values from old table, here operator= of segment_t is used
221  if( v.my_segment.compare_and_swap( new_segment_table, v.my_storage ) != v.my_storage )
222  NFS_Free( new_segment_table );
223  // else TODO: add ITT_NOTIFY signals for v.my_segment?
224 }
225 
227  bool mark_as_not_used_on_failure ) {
228 
229  struct segment_scope_guard : no_copy{
230  segment_t* my_segment_ptr;
231  bool my_mark_as_not_used;
232  segment_scope_guard(segment_t& segment, bool mark_as_not_used) : my_segment_ptr(&segment), my_mark_as_not_used(mark_as_not_used){}
233  void dismiss(){ my_segment_ptr = 0;}
234  ~segment_scope_guard(){
235  if (my_segment_ptr){
236  if (!my_mark_as_not_used){
237  publish_segment(*my_segment_ptr, segment_allocation_failed());
238  }else{
239  publish_segment(*my_segment_ptr, segment_not_used());
240  }
241  }
242  }
243  };
244 
245  segment_t* s = v.my_segment; // TODO: optimize out as argument? Optimize accesses to my_first_block
246  __TBB_ASSERT(s[k].load<relaxed>() != segment_allocated(), "concurrent operation during growth?");
247 
248  size_type size_of_enabled_segment = segment_size(k);
249  size_type size_to_allocate = size_of_enabled_segment;
250  if( !k ) {
251  assign_first_segment_if_necessary(v, default_initial_segments-1);
252  size_of_enabled_segment = 2 ;
253  size_to_allocate = segment_size(v.my_first_block);
254 
255  } else {
257  }
258 
259  if( k && (k < v.my_first_block)){ //no need to allocate anything
260  // s[0].array is changed only once ( 0 -> !0 ) and points to uninitialized memory
261  segment_value_t array0 = s[0].load<acquire>();
262  if(array0 == segment_not_used()){
263  // sync_prepare called only if there is a wait
264  ITT_NOTIFY(sync_prepare, &s[0]);
266  array0 = s[0].load<acquire>();
267  }
268  ITT_NOTIFY(sync_acquired, &s[0]);
269 
270  segment_scope_guard k_segment_guard(s[k], false);
271  enforce_segment_allocated(array0); // initial segment should be allocated
272  k_segment_guard.dismiss();
273 
274  publish_segment( s[k],
275  static_cast<void*>(array0.pointer<char>() + segment_base(k)*element_size )
276  );
277  } else {
278  segment_scope_guard k_segment_guard(s[k], mark_as_not_used_on_failure);
279  publish_segment(s[k], allocate_segment(v, size_to_allocate));
280  k_segment_guard.dismiss();
281  }
282  return size_of_enabled_segment;
283 }
284 
286  if( !sz ) { // allocation failed, restore the table
287  segment_index_t k_start = k, k_end = segment_index_of(finish-1);
288  if( segment_base( k_start ) < start )
289  get_segment_value(k_start++, true); // wait
290  if( k_start < first_block ) {
291  segment_value_t segment0 = get_segment_value(0, start>0); // wait if necessary
292  if((segment0 != segment_not_used()) && !k_start ) ++k_start;
293  if(segment0 != segment_allocated())
294  for(; k_start < first_block && k_start <= k_end; ++k_start )
295  publish_segment(table[k_start], segment_allocation_failed());
296  else for(; k_start < first_block && k_start <= k_end; ++k_start )
297  publish_segment(table[k_start], static_cast<void*>(
298  (segment0.pointer<char>()) + segment_base(k_start)*element_size) );
299  }
300  for(; k_start <= k_end; ++k_start ) // not in first block
301  if(table[k_start].load<acquire>() == segment_not_used())
302  publish_segment(table[k_start], segment_allocation_failed());
303  // fill allocated items
304  first_segment();
305  goto recover;
306  }
307  while( sz <= finish ) { // there is still work for at least one segment
308  next_segment();
309 recover:
310  segment_value_t array = table[k].load<relaxed>();
311  if(array == segment_allocated())
312  std::memset( (array.pointer<char>()) + element_size*start, 0, ((sz<finish?sz:finish) - start)*element_size );
313  else __TBB_ASSERT( array == segment_allocation_failed(), NULL );
314  }
315 }
316 
319  if( s != my_storage ) {
320 #if TBB_USE_ASSERT
321  //to please assert in segment_t destructor
322  std::fill_n(my_storage,size_t(pointers_per_short_table),segment_t());
323 #endif /* TBB_USE_ASSERT */
324 #if TBB_USE_DEBUG
325  for( segment_index_t i = 0; i < pointers_per_long_table; i++)
326  __TBB_ASSERT( my_segment[i].load<relaxed>() != segment_allocated(), "Segment should have been freed. Please recompile with new TBB before using exceptions.");
327 #endif
329  NFS_Free( s );
330  }
331 }
332 
334  return segment_base( helper::find_segment_end(*this) );
335 }
336 
339  __TBB_ASSERT(t < sizeof(ids) / sizeof(exception_id), NULL);
340  throw_exception(ids[t]);
341 }
342 
344  if( n>max_size )
346  __TBB_ASSERT( n, NULL );
349 
350  for( ; segment_base(k)<n; ++k ) {
352  if(my_segment[k].load<relaxed>() != segment_allocated())
353  helper::enable_segment(*this, k, element_size, true ); //in case of failure mark segments as not used
354  }
355 }
356 
357 //TODO: Looks like atomic loads can be done relaxed here, as the only place this method is called from
358 //is the constructor, which does not require synchronization (for more details see comment in the
359 // concurrent_vector_base constructor).
361  size_type n = src.my_early_size;
363  if( n ) {
365  size_type b;
366  for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {
367  if( (src.my_segment.load<acquire>() == src.my_storage && k >= pointers_per_short_table)
368  || (src.my_segment[k].load<relaxed>() != segment_allocated())) {
369  my_early_size = b; break;
370  }
372  size_type m = helper::enable_segment(*this, k, element_size);
373  if( m > n-b ) m = n-b;
374  my_early_size = b+m;
375  copy( my_segment[k].load<relaxed>().pointer<void>(), src.my_segment[k].load<relaxed>().pointer<void>(), m );
376  }
377  }
378 }
379 
381  size_type n = src.my_early_size;
382  while( my_early_size>n ) { // TODO: improve
384  size_type b=segment_base(k);
385  size_type new_end = b>=n ? b : n;
386  __TBB_ASSERT( my_early_size>new_end, NULL );
387  enforce_segment_allocated(my_segment[k].load<relaxed>()); //if vector was broken before
388  // destructors are supposed to not throw any exceptions
389  destroy( my_segment[k].load<relaxed>().pointer<char>() + element_size*(new_end-b), my_early_size-new_end );
390  my_early_size = new_end;
391  }
392  size_type dst_initialized_size = my_early_size;
393  my_early_size = n;
395  size_type b;
396  for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) {
397  if( (src.my_segment.load<acquire>() == src.my_storage && k >= pointers_per_short_table)
398  || src.my_segment[k].load<relaxed>() != segment_allocated() ) { // if source is damaged
399  my_early_size = b; break; // TODO: it may cause undestructed items
400  }
402  if( my_segment[k].load<relaxed>() == segment_not_used())
403  helper::enable_segment(*this, k, element_size);
404  else
405  enforce_segment_allocated(my_segment[k].load<relaxed>());
406  size_type m = k? segment_size(k) : 2;
407  if( m > n-b ) m = n-b;
408  size_type a = 0;
409  if( dst_initialized_size>b ) {
410  a = dst_initialized_size-b;
411  if( a>m ) a = m;
412  assign( my_segment[k].load<relaxed>().pointer<void>(), src.my_segment[k].load<relaxed>().pointer<void>(), a );
413  m -= a;
414  a *= element_size;
415  }
416  if( m>0 )
417  copy( my_segment[k].load<relaxed>().pointer<char>() + a, src.my_segment[k].load<relaxed>().pointer<char>() + a, m );
418  }
419  __TBB_ASSERT( src.my_early_size==n, "detected use of concurrent_vector::operator= with right side that was concurrently modified" );
420 }
421 
423  __TBB_ASSERT( sizeof(my_early_size)==sizeof(uintptr_t), NULL );
424  size_type tmp = my_early_size.fetch_and_increment<acquire>();
425  index = tmp;
426  segment_index_t k_old = segment_index_of( tmp );
427  size_type base = segment_base(k_old);
428  helper::extend_table_if_necessary(*this, k_old, tmp);
429  segment_t& s = helper::acquire_segment(*this, k_old, element_size, base==tmp);
430  size_type j_begin = tmp-base;
431  return (void*)(s.load<relaxed>().pointer<char>() + element_size*j_begin);
432 }
433 
435  internal_grow_to_at_least_with_result( new_size, element_size, init, src );
436 }
437 
440  while( e<new_size ) {
442  if( f==e ) {
443  internal_grow( e, new_size, element_size, init, src );
444  break;
445  }
446  e = f;
447  }
448  // Check/wait for segments allocation completes
449  segment_index_t i, k_old = segment_index_of( new_size-1 );
450  if( k_old >= pointers_per_short_table && my_segment == my_storage ) {
452  }
453  for( i = 0; i <= k_old; ++i ) {
454  segment_t &s = my_segment[i];
455  if(s.load<relaxed>() == segment_not_used()) {
456  ITT_NOTIFY(sync_prepare, &s);
457  atomic_backoff backoff(true);
458  while( my_segment[i].load<acquire>() == segment_not_used() ) // my_segment may change concurrently
459  backoff.pause();
460  ITT_NOTIFY(sync_acquired, &s);
461  }
462  enforce_segment_allocated(my_segment[i].load<relaxed>());
463  }
464 #if TBB_USE_DEBUG
465  size_type capacity = internal_capacity();
466  __TBB_ASSERT( capacity >= new_size, NULL);
467 #endif
468  return e;
469 }
470 
472  size_type result = my_early_size.fetch_and_add(delta);
473  internal_grow( result, result+delta, element_size, init, src );
474  return result;
475 }
476 
477 void concurrent_vector_base_v3::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src ) {
478  __TBB_ASSERT( start<finish, "start must be less than finish" );
479  segment_index_t k_start = segment_index_of(start), k_end = segment_index_of(finish-1);
481  helper::extend_table_if_necessary(*this, k_end, start);
482  helper range(my_segment, my_first_block, element_size, k_start, start, finish);
483  for(; k_end > k_start && k_end >= range.first_block; --k_end ) // allocate segments in reverse order
484  helper::acquire_segment(*this, k_end, element_size, true/*for k_end>k_start*/);
485  for(; k_start <= k_end; ++k_start ) // but allocate first block in straight order
486  helper::acquire_segment(*this, k_start, element_size, segment_base( k_start ) >= start );
487  range.apply( helper::init_body(init, src) );
488 }
489 
490 void concurrent_vector_base_v3::internal_resize( size_type n, size_type element_size, size_type max_size, const void *src,
491  internal_array_op1 destroy, internal_array_op2 init ) {
493  if( n > j ) { // construct items
494  internal_reserve(n, element_size, max_size);
495  my_early_size = n;
496  helper for_each(my_segment, my_first_block, element_size, segment_index_of(j), j, n);
497  for_each.apply( helper::safe_init_body(init, src) );
498  } else {
499  my_early_size = n;
500  helper for_each(my_segment, my_first_block, element_size, segment_index_of(n), n, j);
501  for_each.apply( helper::destroy_body(destroy) );
502  }
503 }
504 
506  __TBB_ASSERT( my_segment, NULL );
508  my_early_size = 0;
509  helper for_each(my_segment, my_first_block, 0, 0, 0, j); // element_size is safe to be zero if 'start' is zero
510  j = for_each.apply( helper::destroy_body(destroy) );
512  return j < i? i : j+1;
513 }
514 
516 {
517  const size_type my_size = my_early_size;
518  const segment_index_t k_end = helper::find_segment_end(*this); // allocated segments
519  const segment_index_t k_stop = my_size? segment_index_of(my_size-1) + 1 : 0; // number of segments to store existing items: 0=>0; 1,2=>1; 3,4=>2; [5-8]=>3;..
520  const segment_index_t first_block = my_first_block; // number of merged segments, getting values from atomics
521 
522  segment_index_t k = first_block;
523  if(k_stop < first_block)
524  k = k_stop;
525  else
526  while (k < k_stop && helper::incompact_predicate(segment_size( k ) * element_size) ) k++;
527  if(k_stop == k_end && k == first_block)
528  return NULL;
529 
530  segment_t *const segment_table = my_segment;
531  internal_segments_table &old = *static_cast<internal_segments_table*>( table );
532  //this call is left here for sake of backward compatibility, and as a placeholder for table initialization
533  std::fill_n(old.table,sizeof(old.table)/sizeof(old.table[0]),segment_t());
534  old.first_block=0;
535 
536  if ( k != first_block && k ) // first segment optimization
537  {
538  // exception can occur here
539  void *seg = helper::allocate_segment(*this, segment_size(k));
540  old.table[0].store<relaxed>(seg);
541  old.first_block = k; // fill info for freeing new segment if exception occurs
542  // copy items to the new segment
543  size_type my_segment_size = segment_size( first_block );
544  for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) {
545  __TBB_ASSERT( segment_table[i].load<relaxed>() == segment_allocated(), NULL);
546  void *s = static_cast<void*>(
547  static_cast<char*>(seg) + segment_base(i)*element_size );
548  //TODO: refactor to use std::min
549  if(j + my_segment_size >= my_size) my_segment_size = my_size - j;
550  __TBB_TRY { // exception can occur here
551  copy( s, segment_table[i].load<relaxed>().pointer<void>(), my_segment_size );
552  } __TBB_CATCH(...) { // destroy all the already copied items
553  helper for_each(&old.table[0], old.first_block, element_size,
554  0, 0, segment_base(i)+ my_segment_size);
555  for_each.apply( helper::destroy_body(destroy) );
556  __TBB_RETHROW();
557  }
558  my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block );
559  }
560  // commit the changes
561  std::copy(segment_table,segment_table + k,old.table);
562  for (segment_index_t i = 0; i < k; i++) {
563  segment_table[i].store<relaxed>(static_cast<void*>(
564  static_cast<char*>(seg) + segment_base(i)*element_size ));
565  }
566  old.first_block = first_block; my_first_block = k; // now, first_block != my_first_block
567  // destroy original copies
568  my_segment_size = segment_size( first_block ); // old.first_block actually
569  for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) {
570  if(j + my_segment_size >= my_size) my_segment_size = my_size - j;
571  // destructors are supposed to not throw any exceptions
572  destroy( old.table[i].load<relaxed>().pointer<void>(), my_segment_size );
573  my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block );
574  }
575  }
576  // free unnecessary segments allocated by reserve() call
577  if ( k_stop < k_end ) {
578  old.first_block = first_block;
579  std::copy(segment_table+k_stop, segment_table+k_end, old.table+k_stop );
580  std::fill_n(segment_table+k_stop, (k_end-k_stop), segment_t());
581  if( !k ) my_first_block = 0;
582  }
583  return table;
584 }
585 
587 {
588  size_type my_sz = my_early_size.load<acquire>();
589  size_type v_sz = v.my_early_size.load<relaxed>();
590  if(!my_sz && !v_sz) return;
591 
592  bool my_was_short = (my_segment.load<relaxed>() == my_storage);
593  bool v_was_short = (v.my_segment.load<relaxed>() == v.my_storage);
594 
595  //In C++11, this would be: swap(my_storage, v.my_storage);
596  for (int i=0; i < pointers_per_short_table; ++i){
597  swap(my_storage[i], v.my_storage[i]);
598  }
599  tbb::internal::swap<relaxed>(my_first_block, v.my_first_block);
600  tbb::internal::swap<relaxed>(my_segment, v.my_segment);
601  if (my_was_short){
602  v.my_segment.store<relaxed>(v.my_storage);
603  }
604  if(v_was_short){
605  my_segment.store<relaxed>(my_storage);
606  }
607 
608  my_early_size.store<relaxed>(v_sz);
609  v.my_early_size.store<release>(my_sz);
610 }
611 
612 } // namespace internal
613 
614 } // tbb
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
static size_type enable_segment(concurrent_vector_base_v3 &v, size_type k, size_type element_size, bool mark_as_not_used_on_failure=false)
segment_t my_storage[pointers_per_short_table]
embedded storage of segment pointers
void __TBB_EXPORTED_METHOD internal_swap(concurrent_vector_base_v3 &v)
void *__TBB_EXPORTED_METHOD internal_push_back(size_type element_size, size_type &index)
size_type __TBB_EXPORTED_METHOD internal_grow_to_at_least_with_result(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
No ordering.
Definition: atomic.h:51
static const size_type page_size
memory page size
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:536
void __TBB_EXPORTED_METHOD internal_reserve(size_type n, size_type element_size, size_type max_size)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
static segment_t & acquire_segment(concurrent_vector_base_v3 &v, size_type index, size_type element_size, bool owner)
segment_index_t __TBB_EXPORTED_METHOD internal_clear(internal_array_op1 destroy)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
static size_type segment_size(segment_index_t k)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
void __TBB_EXPORTED_METHOD internal_copy(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op2 copy)
size_type __TBB_EXPORTED_METHOD internal_grow_by(size_type delta, size_type element_size, internal_array_op2 init, const void *src)
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:335
segment_value_t get_segment_value(size_type index, bool wait)
void pause()
Pause for a while.
Definition: tbb_machine.h:364
void operator()(segment_t &s, void *begin, size_type n) const
#define __TBB_TRY
Definition: tbb_stddef.h:287
atomic< size_type > my_first_block
count of segments in the first block
__TBB_EXPORTED_METHOD ~concurrent_vector_base_v3()
friend void enforce_segment_allocated(segment_value_t const &s, internal::exception_id exception=eid_bad_last_alloc)
Acquire.
Definition: atomic.h:47
size_type __TBB_EXPORTED_METHOD internal_capacity() const
void __TBB_EXPORTED_METHOD internal_assign(const concurrent_vector_base_v3 &src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy)
void cleanup()
Out of line code to assists destructor in infrequent cases.
Base class for types that should not be assigned.
Definition: tbb_stddef.h:324
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
void(__TBB_EXPORTED_FUNC * internal_array_op2)(void *dst, const void *src, size_type n)
An operation on n-element destination array and n-element source array.
friend void swap(segment_t &, segment_t &) __TBB_NOEXCEPT(true)
static void extend_segment_table(concurrent_vector_base_v3 &v, size_type start)
helper(segment_t *segments, size_type fb, size_type esize, size_type index, size_type s, size_type f)
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:288
value_type load() const
Definition: atomic.h:306
static void publish_segment(segment_t &s, argument_type rhs)
Publish segment so other threads can see it.
static segment_index_t segment_index_of(size_type index)
The graph class.
void(__TBB_EXPORTED_FUNC * internal_array_op1)(void *begin, size_type n)
An operation on an n-element array starting at begin.
void __TBB_EXPORTED_METHOD internal_resize(size_type n, size_type element_size, size_type max_size, const void *src, internal_array_op1 destroy, internal_array_op2 init)
atomic< size_type > my_early_size
Requested size of vector.
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:395
Base class of concurrent vector implementation.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
void operator()(segment_t &s, void *begin, size_type n) const
static size_type find_segment_end(const concurrent_vector_base_v3 &v)
Class that implements exponential backoff.
Definition: tbb_machine.h:349
void __TBB_EXPORTED_METHOD internal_grow_to_at_least(size_type new_size, size_type element_size, internal_array_op2 init, const void *src)
Deprecated entry point for backwards compatibility to TBB 2.1.
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
static segment_index_t segment_base(segment_index_t k)
void internal_grow(size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src)
void const char const char int ITT_FORMAT __itt_group_sync s
void spin_wait_while(predicate_type condition)
Definition: tbb_machine.h:409
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
TODO: turn into lambda functions when available.
atomic< segment_t * > my_segment
Pointer to the segments table.
value_type compare_and_swap(value_type value, value_type comparand)
Definition: atomic.h:289
static void extend_table_if_necessary(concurrent_vector_base_v3 &v, size_type k, size_type start)
Number of slots for segment pointers inside the class.
void store(value_type value)
Definition: atomic.h:317
static void assign_first_segment_if_necessary(concurrent_vector_base_v3 &v, segment_index_t k)
assign first segment size. k - is index of last segment to be allocated, not a count of segments
void *(* vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t)
allocator function pointer
void __TBB_EXPORTED_METHOD internal_throw_exception(size_type) const
Obsolete.
void operator()(segment_t &, void *begin, size_type n) const
Release.
Definition: atomic.h:49
#define __TBB_RETHROW()
Definition: tbb_stddef.h:290
void *__TBB_EXPORTED_METHOD internal_compact(size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy)
static void * allocate_segment(concurrent_vector_base_v3 &v, size_type n)

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.