23#if __TBB_TASK_GROUP_CONTEXT
31inline char* duplicate_string (
const char* src ) {
34 size_t len = strlen(src) + 1;
36 strncpy (dst, src, len);
46 my_exception_name = duplicate_string( a_name );
47 my_exception_info = duplicate_string( info );
97#if !TBB_USE_CAPTURED_EXCEPTION
102tbb_exception_ptr* AllocateExceptionContainer(
const T& src ) {
105 new (eptr) tbb_exception_ptr(src);
110 return AllocateExceptionContainer( std::current_exception() );
114 return AllocateExceptionContainer( std::current_exception() );
124 this->tbb_exception_ptr::~tbb_exception_ptr();
140 uintptr_t local_count_snapshot =
my_owner->my_context_state_propagation_epoch;
158 if ( local_count_snapshot != the_context_state_propagation_epoch ) {
180 my_owner->my_context_list_mutex.lock();
183 my_owner->my_context_list_mutex.unlock();
190 internal::punned_cast<cpu_ctl_env*>(&
my_cpu_ctl_env)->~cpu_ctl_env();
202 my_name = internal::CUSTOM_CTX;
217#if __TBB_TASK_PRIORITY
225 cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&
my_cpu_ctl_env);
226 new ( &ctl ) cpu_ctl_env;
239 local_sched->my_local_ctx_list_update.store<
relaxed>(1);
245 if ( local_sched->my_nonlocal_ctx_list_update.load<
relaxed>() ) {
247 local_sched->my_context_list_head.my_next->my_prev = &
my_node;
250 local_sched->my_context_list_head.my_next = &
my_node;
267 my_parent = local_sched->my_innermost_running_task->prefix().context;
293#if __TBB_TASK_PRIORITY
302 if ( local_count_snapshot != the_context_state_propagation_epoch ) {
304 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
306#if __TBB_TASK_PRIORITY
317#if __TBB_TASK_PRIORITY
326 if (this->*mptr_state == new_state) {
334 else if (
this == &src) {
342 __TBB_ASSERT(internal::is_alive(ancestor->my_version_and_traits),
"context tree was corrupted");
343 if ( ancestor == &src ) {
345 ctx->*mptr_state = new_state;
353void generic_scheduler::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
359 while ( node != &my_context_list_head ) {
361 if ( ctx.*mptr_state != new_state )
362 ctx.propagate_task_group_state( mptr_state, src, new_state );
363 node = node->my_next;
364 __TBB_ASSERT( is_alive(ctx.my_version_and_traits),
"Local context list contains destroyed object" );
372bool market::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
378 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
379 if ( src.*mptr_state != new_state )
386 for (
unsigned i = 0; i < num_workers; ++i ) {
390 s->propagate_task_group_state( mptr_state, src, new_state );
395 it->propagate_task_group_state( mptr_state, src, new_state );
400 __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1,
"Invalid cancellation state");
401 if ( my_cancellation_requested ||
as_atomic(my_cancellation_requested).compare_and_swap(1, 0) ) {
412 return my_cancellation_requested != 0;
420 if ( my_exception ) {
421 my_exception->destroy();
424 my_cancellation_requested = 0;
433 cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
434 if ( !(my_version_and_traits & fp_settings) ) {
435 new ( &ctl ) cpu_ctl_env;
436 my_version_and_traits |= fp_settings;
442 __TBB_ASSERT( !(my_version_and_traits & fp_settings),
"The context already has FPU settings." );
443 __TBB_ASSERT( src.my_version_and_traits & fp_settings,
"The source context does not have FPU settings." );
445 cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
446 cpu_ctl_env &src_ctl = *internal::punned_cast<cpu_ctl_env*>(&src.my_cpu_ctl_env);
447 new (&ctl) cpu_ctl_env( src_ctl );
448 my_version_and_traits |= fp_settings;
453 if ( my_cancellation_requested )
455#if TBB_USE_EXCEPTIONS
458 } TbbCatchAll(
this );
462#if __TBB_TASK_PRIORITY
465 intptr_t
p = normalize_priority(prio);
483 s->my_market->update_arena_priority( *
s->my_arena,
p );
487 return static_cast<priority_t>(priority_from_normalized_rep[my_priority]);
#define __TBB_FetchAndAddWrelease(P, V)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
#define __TBB_STATIC_ASSERT(condition, msg)
#define __TBB_get_object_ref(class_name, member_name, member_addr)
Returns address of the object containing a member with the given name and address.
#define ITT_TASK_GROUP(type, name, parent)
#define ITT_STACK(precond, name, obj)
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
void const char const char int ITT_FORMAT __itt_group_sync p
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
void atomic_fence()
Sequentially consistent full memory fence.
@ full_fence
Sequential consistency.
void __TBB_EXPORTED_FUNC deallocate_via_handler_v3(void *p)
Deallocates memory using FreeHandler.
T __TBB_load_with_acquire(const volatile T &location)
void __TBB_store_relaxed(volatile T &location, V value)
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
atomic< T > & as_atomic(T &t)
T __TBB_load_relaxed(const volatile T &location)
void *__TBB_EXPORTED_FUNC allocate_via_handler_v3(size_t n)
Allocates memory using MallocHandler.
void __TBB_store_with_release(volatile T &location, V value)
int space[sizeof(internal::uint64_t)/sizeof(int)]
context_list_node_t * my_next
context_list_node_t * my_prev
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
bool __TBB_EXPORTED_METHOD cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
internal::string_index my_name
Description of algorithm for scheduler based instrumentation.
void propagate_task_group_state(T task_group_context::*mptr_state, task_group_context &src, T new_state)
Propagates any state change detected to *this, and as an optimisation possibly also upward along the ...
static const kind_type binding_completed
intptr_t my_priority
Priority level of the task group (in normalized representation)
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
void register_with(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler.
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
static const kind_type detached
__TBB_DEPRECATED_IN_VERBOSE_MODE void set_priority(priority_t)
Changes priority of the task group.
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
task_group_context(kind_type relation_with_parent=bound, uintptr_t t=default_traits)
Default & binding constructor.
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
__TBB_DEPRECATED_IN_VERBOSE_MODE priority_t priority() const
Retrieves current priority of the current task group.
void __TBB_EXPORTED_METHOD init()
Out-of-line part of the constructor.
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
void __TBB_EXPORTED_METHOD capture_fp_settings()
Captures the current FPU control settings to the context.
__TBB_EXPORTED_METHOD ~task_group_context()
internal::cpu_ctl_env_space my_cpu_ctl_env
Space for platform-specific FPU settings.
static const kind_type dying
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
void __TBB_EXPORTED_METHOD reset()
Forcefully reinitializes the context after the task tree it was associated with is completed.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
static const kind_type binding_required
@ executing
task is running, and will be destroyed after method execute() completes.
const char * my_exception_name
void __TBB_EXPORTED_METHOD clear()
captured_exception()
Used only by method move().
__TBB_EXPORTED_METHOD ~captured_exception()
const char *__TBB_EXPORTED_METHOD name() const __TBB_override
Returns RTTI name of the originally intercepted exception.
const char *__TBB_EXPORTED_METHOD what() const __TBB_override
Returns the result of originally intercepted exception's what() method.
captured_exception *__TBB_EXPORTED_METHOD move() __TBB_override
Creates and returns pointer to the deep copy of this exception object.
void __TBB_EXPORTED_METHOD set(const char *name, const char *info)
void __TBB_EXPORTED_METHOD destroy() __TBB_override
Destroys objects created by the move() method.
static captured_exception * allocate(const char *name, const char *info)
Functionally equivalent to {captured_exception e(name,info); return e.move();}.
const char * my_exception_info
void destroy()
Destroys this objects.
tbb_exception_ptr(const std::exception_ptr &src)
static tbb_exception_ptr * allocate()
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
static generic_scheduler * local_scheduler_weak()
static generic_scheduler * local_scheduler_if_initialized()
iterator_impl< T > iterator
atomic< unsigned > my_first_unused_worker_idx
First unused index of worker.
friend class generic_scheduler
market * my_market
The market I am in.