Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
task.cpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
21 // Do not include task.h directly. Use scheduler_common.h instead
22 #include "scheduler_common.h"
23 #include "governor.h"
24 #include "scheduler.h"
25 #include "itt_notify.h"
26 
28 #include "tbb/partitioner.h"
29 
30 #include <new>
31 
32 namespace tbb {
33 
34 namespace internal {
35 
36 //------------------------------------------------------------------------
37 // Methods of allocate_root_proxy
38 //------------------------------------------------------------------------
40  internal::generic_scheduler* v = governor::local_scheduler_weak();
41  __TBB_ASSERT( v, "thread did not activate a task_scheduler_init object?" );
42 #if __TBB_TASK_GROUP_CONTEXT
43  task_prefix& p = v->my_innermost_running_task->prefix();
44 
45  ITT_STACK_CREATE(p.context->itt_caller);
46 #endif
47  // New root task becomes part of the currently running task's cancellation context
48  return v->allocate_task( size, __TBB_CONTEXT_ARG(NULL, p.context) );
49 }
50 
52  internal::generic_scheduler* v = governor::local_scheduler_weak();
53  __TBB_ASSERT( v, "thread does not have initialized task_scheduler_init object?" );
54 #if __TBB_TASK_GROUP_CONTEXT
55  // No need to do anything here as long as there is no context -> task connection
56 #endif /* __TBB_TASK_GROUP_CONTEXT */
57  v->free_task<local_task>( task );
58 }
59 
60 #if __TBB_TASK_GROUP_CONTEXT
61 //------------------------------------------------------------------------
62 // Methods of allocate_root_with_context_proxy
63 //------------------------------------------------------------------------
65  internal::generic_scheduler* s = governor::local_scheduler_weak();
66  __TBB_ASSERT( s, "Scheduler auto-initialization failed?" );
67  __TBB_ASSERT( &my_context, "allocate_root(context) argument is a dereferenced NULL pointer" );
68  task& t = s->allocate_task( size, NULL, &my_context );
69  // Supported usage model prohibits concurrent initial binding. Thus we do not
70  // need interlocked operations or fences to manipulate with my_context.my_kind
72  // If we are in the outermost task dispatch loop of a master thread, then
73  // there is nothing to bind this context to, and we skip the binding part
74  // treating the context as isolated.
75  if ( s->master_outermost_level() )
77  else
78  my_context.bind_to( s );
79  }
80 #if __TBB_FP_CONTEXT
83  my_context.copy_fp_settings( *s->default_context() );
84 #endif
86  return t;
87 }
88 
90  internal::generic_scheduler* v = governor::local_scheduler_weak();
91  __TBB_ASSERT( v, "thread does not have initialized task_scheduler_init object?" );
92  // No need to do anything here as long as unbinding is performed by context destructor only.
93  v->free_task<local_task>( task );
94 }
95 #endif /* __TBB_TASK_GROUP_CONTEXT */
96 
97 //------------------------------------------------------------------------
98 // Methods of allocate_continuation_proxy
99 //------------------------------------------------------------------------
101  task* t = (task*)this;
104  task* parent = t->parent();
105  t->prefix().parent = NULL;
106  return s->allocate_task( size, __TBB_CONTEXT_ARG(parent, t->prefix().context) );
107 }
108 
110  // Restore the parent as it was before the corresponding allocate was called.
111  ((task*)this)->prefix().parent = mytask.parent();
113 }
114 
115 //------------------------------------------------------------------------
116 // Methods of allocate_child_proxy
117 //------------------------------------------------------------------------
119  task* t = (task*)this;
122  return s->allocate_task( size, __TBB_CONTEXT_ARG(t, t->prefix().context) );
123 }
124 
125 void allocate_child_proxy::free( task& mytask ) const {
127 }
128 
129 //------------------------------------------------------------------------
130 // Methods of allocate_additional_child_of_proxy
131 //------------------------------------------------------------------------
135  return s->allocate_task( size, __TBB_CONTEXT_ARG(&parent, parent.prefix().context) );
136 }
137 
139  // Undo the increment. We do not check the result of the fetch-and-decrement.
140  // We could consider be spawning the task if the fetch-and-decrement returns 1.
141  // But we do not know that was the programmer's intention.
142  // Furthermore, if it was the programmer's intention, the program has a fundamental
143  // race condition (that we warn about in Reference manual), because the
144  // reference count might have become zero before the corresponding call to
145  // allocate_additional_child_of_proxy::allocate.
148 }
149 
150 //------------------------------------------------------------------------
151 // Support for auto_partitioner
152 //------------------------------------------------------------------------
154  const size_t X_FACTOR = 4;
155  return X_FACTOR * governor::local_scheduler()->max_threads_in_arena();
156 }
157 
158 //------------------------------------------------------------------------
159 // Methods of affinity_partitioner_base_v3
160 //------------------------------------------------------------------------
161 void affinity_partitioner_base_v3::resize( unsigned factor ) {
162  // Check factor to avoid asking for number of workers while there might be no arena.
163  size_t new_size = factor ? factor*governor::local_scheduler()->max_threads_in_arena() : 0;
164  if( new_size!=my_size ) {
165  if( my_array ) {
166  NFS_Free( my_array );
167  // Following two assignments must be done here for sake of exception safety.
168  my_array = NULL;
169  my_size = 0;
170  }
171  if( new_size ) {
172  my_array = static_cast<affinity_id*>(NFS_Allocate(new_size,sizeof(affinity_id), NULL ));
173  memset( my_array, 0, sizeof(affinity_id)*new_size );
174  my_size = new_size;
175  }
176  }
177 }
178 
179 } // namespace internal
180 
181 using namespace tbb::internal;
182 
183 //------------------------------------------------------------------------
184 // task
185 //------------------------------------------------------------------------
186 
188  __TBB_ASSERT( count>=0, "count must not be negative" );
189  task_prefix &p = prefix();
190  __TBB_ASSERT(p.ref_count==1 && p.state==allocated && self().parent()==this
191  || !(p.extra_state & es_ref_count_active), "ref_count race detected");
192  ITT_NOTIFY(sync_releasing, &p.ref_count);
193  p.ref_count = count;
194 }
195 
197  ITT_NOTIFY( sync_releasing, &prefix().ref_count );
199  __TBB_ASSERT( k>=1, "task's reference count underflowed" );
200  if( k==1 )
201  ITT_NOTIFY( sync_acquired, &prefix().ref_count );
202  return k-1;
203 }
204 
209  return *v->my_innermost_running_task;
210 }
211 
213  return true;
214 }
215 
217  // 1 may be a guard reference for wait_for_all, which was not reset because
218  // of concurrent_wait mode or because prepared root task was not actually used
219  // for spawning tasks (as in structured_task_group).
220  __TBB_ASSERT( (intptr_t)victim.prefix().ref_count <= 1, "Task being destroyed must not have children" );
221  __TBB_ASSERT( victim.state()==task::allocated, "illegal state for victim task" );
222  task* parent = victim.parent();
223  victim.~task();
224  if( parent ) {
225  __TBB_ASSERT( parent->state()!=task::freed && parent->state()!=task::ready,
226  "attempt to destroy child of running or corrupted parent?" );
227  // 'reexecute' and 'executing' are also signs of a race condition, since most tasks
228  // set their ref_count upon entry but "es_ref_count_active" should detect this
229  parent->internal_decrement_ref_count();
230  // Even if the last reference to *parent is removed, it should not be spawned (documented behavior).
231  }
233 }
234 
237  task* t = list.first;
238  if( t ) {
239  if( &t->prefix().next!=list.next_ptr )
240  s->local_spawn( t->prefix().next, *list.next_ptr );
241  list.clear();
242  }
243  s->local_wait_for_all( *this, t );
244 }
245 
250 }
251 
252 #if __TBB_TASK_GROUP_CONTEXT
254  prefix().context = &ctx;
255  internal::generic_scheduler* s = governor::local_scheduler_weak();
257  // If we are in the outermost task dispatch loop of a master thread, then
258  // there is nothing to bind this context to, and we skip the binding part
259  // treating the context as isolated.
260  if ( s->master_outermost_level() )
262  else
263  ctx.bind_to( s );
264  }
265 #if __TBB_FP_CONTEXT
268  ctx.copy_fp_settings( *s->default_context() );
269 #endif
271 }
272 #endif /* __TBB_TASK_GROUP_CONTEXT */
273 
274 } // namespace tbb
275 
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:138
unsigned max_threads_in_arena()
Returns the concurrency limit of the current arena.
Definition: scheduler.h:599
#define __TBB_CONTEXT_ARG(arg1, context)
void clear()
Clear the list.
Definition: task.h:1034
Memory prefix to a task object.
Definition: task.h:188
unsigned short affinity_id
An id as used for specifying affinity.
Definition: task.h:124
void __TBB_EXPORTED_METHOD internal_set_ref_count(int count)
Set reference count.
Definition: task.cpp:187
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
Definition: governor.h:126
Set if ref_count might be changed by another thread. Used for debugging.
static void __TBB_EXPORTED_FUNC destroy(task &victim)
Destroy a task.
Definition: task.cpp:216
#define __TBB_FetchAndDecrementWrelease(P)
Definition: tbb_machine.h:315
Disable caching for a small task.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
Task is known to have been allocated by this scheduler.
task object is freshly allocated or recycled.
Definition: task.h:620
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:125
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:118
task ** next_ptr
Definition: task.h:993
Used to form groups of tasks.
Definition: task.h:335
internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count()
Decrement reference count and return its new value.
Definition: task.cpp:196
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:132
static void __TBB_EXPORTED_FUNC free(task &)
Definition: task.cpp:51
A list of children.
Definition: task.h:990
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
Definition: task.h:395
Base class for user-defined tasks.
Definition: task.h:592
Work stealing task scheduler.
Definition: scheduler.h:124
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
Definition: scheduler.h:81
static task &__TBB_EXPORTED_FUNC self()
The innermost task being executed or destroyed by the current thread at the moment.
Definition: task.cpp:205
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
static const kind_type binding_required
Definition: task.h:566
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
void const char const char int ITT_FORMAT __itt_group_sync p
task * first
Definition: task.h:992
void spawn_and_wait_for_all(task &child)
Similar to spawn followed by wait_for_all, but more efficient.
Definition: task.h:773
internal::affinity_id affinity_id
An id as used for specifying affinity.
Definition: task.h:879
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
Definition: task.h:830
void __TBB_store_relaxed(volatile T &location, V value)
Definition: tbb_machine.h:743
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:109
void __TBB_EXPORTED_METHOD free(task &) const
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
The graph class.
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
Definition: task.h:382
void __TBB_EXPORTED_METHOD resize(unsigned factor)
Resize my_array.
Definition: task.cpp:161
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:100
task is in ready pool, or is going to be put there, or was just taken off.
Definition: task.h:618
task object is on free list, or is going to be put there, or was just taken off.
Definition: task.h:622
static task &__TBB_EXPORTED_FUNC allocate(size_t size)
Definition: task.cpp:39
virtual ~task()
Destructor.
Definition: task.h:606
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
Definition: task.h:423
void increment_ref_count()
Atomically increment reference count.
Definition: task.h:744
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor()
Definition: task.cpp:153
bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const
Obsolete, and only retained for the sake of backward compatibility. Always returns true.
Definition: task.cpp:212
void __TBB_EXPORTED_METHOD change_group(task_group_context &ctx)
Moves this task from its current group into another one.
void const char const char int ITT_FORMAT __itt_group_sync s
affinity_id * my_array
Array that remembers affinities of tree positions to affinity_id.
Definition: partitioner.h:86
static generic_scheduler * local_scheduler_weak()
Definition: governor.h:131
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:739
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
state_type state() const
Current execution state.
Definition: task.h:859
intptr_t reference_count
A reference count.
Definition: task.h:121
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Definition: task.h:941
size_t my_size
Number of elements in my_array.
Definition: partitioner.h:88
void assert_task_valid(const task *)
Identifiers declared inside namespace internal should never be used directly by client code.
Definition: atomic.h:55
void free_task(task &t)
Put task on free list.
Definition: scheduler.h:652
virtual void __TBB_EXPORTED_METHOD note_affinity(affinity_id id)
Invoked by scheduler to notify task that it ran on unexpected thread.
Definition: task.cpp:249
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
#define ITT_STACK_CREATE(obj)
Definition: itt_notify.h:125

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.