00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_task_H
00022 #define __TBB_task_H
00023
00024 #include "tbb_stddef.h"
00025 #include "tbb_machine.h"
00026
00027 namespace tbb {
00028
00029 class task;
00030 class task_list;
00031
00032 #if __TBB_EXCEPTIONS
00033 class task_group_context;
00034 #endif
00035
00037 namespace internal {
00038
00039 class scheduler: no_copy {
00040 public:
00042 virtual void spawn( task& first, task*& next ) = 0;
00043
00045 virtual void wait_for_all( task& parent, task* child ) = 0;
00046
00048 virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
00049
00051
00052 virtual ~scheduler() = 0;
00053 };
00054
00056
00057 typedef intptr reference_count;
00058
00060 typedef unsigned short affinity_id;
00061
00062 #if __TBB_EXCEPTIONS
00063 struct context_list_node_t {
00064 context_list_node_t *my_prev,
00065 *my_next;
00066 };
00067
00068 class allocate_root_with_context_proxy: no_assign {
00069 task_group_context& my_context;
00070 public:
00071 allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
00072 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00073 void __TBB_EXPORTED_METHOD free( task& ) const;
00074 };
00075 #endif
00076
00077 class allocate_root_proxy: no_assign {
00078 public:
00079 static task& __TBB_EXPORTED_FUNC allocate( size_t size );
00080 static void __TBB_EXPORTED_FUNC free( task& );
00081 };
00082
00083 class allocate_continuation_proxy: no_assign {
00084 public:
00085 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00086 void __TBB_EXPORTED_METHOD free( task& ) const;
00087 };
00088
00089 class allocate_child_proxy: no_assign {
00090 public:
00091 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00092 void __TBB_EXPORTED_METHOD free( task& ) const;
00093 };
00094
00095 class allocate_additional_child_of_proxy: no_assign {
00096 task& self;
00097 task& parent;
00098 public:
00099 allocate_additional_child_of_proxy( task& self_, task& parent_ ) : self(self_), parent(parent_) {}
00100 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00101 void __TBB_EXPORTED_METHOD free( task& ) const;
00102 };
00103
00104 class task_group_base;
00105
00107
00112 class task_prefix {
00113 private:
00114 friend class tbb::task;
00115 friend class tbb::task_list;
00116 friend class internal::scheduler;
00117 friend class internal::allocate_root_proxy;
00118 friend class internal::allocate_child_proxy;
00119 friend class internal::allocate_continuation_proxy;
00120 friend class internal::allocate_additional_child_of_proxy;
00121 friend class internal::task_group_base;
00122
00123 #if __TBB_EXCEPTIONS
00125
00128 task_group_context *context;
00129 #endif
00130
00132
00137 scheduler* origin;
00138
00140 scheduler* owner;
00141
00143
00146 tbb::task* parent;
00147
00149
00153 reference_count ref_count;
00154
00156
00157 int depth;
00158
00160
00161 unsigned char state;
00162
00164
00168 unsigned char extra_state;
00169
00170 affinity_id affinity;
00171
00173 tbb::task* next;
00174
00176 tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
00177 };
00178
00179 }
00181
00182 #if __TBB_EXCEPTIONS
00183
00184 #if TBB_USE_CAPTURED_EXCEPTION
00185 class tbb_exception;
00186 #else
00187 namespace internal {
00188 class tbb_exception_ptr;
00189 }
00190 #endif
00191
00193
00213 class task_group_context : internal::no_copy
00214 {
00215 private:
00216 #if TBB_USE_CAPTURED_EXCEPTION
00217 typedef tbb_exception exception_container_type;
00218 #else
00219 typedef internal::tbb_exception_ptr exception_container_type;
00220 #endif
00221
00222 enum version_traits_word_layout {
00223 traits_offset = 16,
00224 version_mask = 0xFFFF,
00225 traits_mask = 0xFFFFul << traits_offset
00226 };
00227
00228 public:
00229 enum kind_type {
00230 isolated,
00231 bound
00232 };
00233
00234 enum traits_type {
00235 exact_exception = 0x0001ul << traits_offset,
00236 no_cancellation = 0x0002ul << traits_offset,
00237 concurrent_wait = 0x0004ul << traits_offset,
00238 #if TBB_USE_CAPTURED_EXCEPTION
00239 default_traits = 0
00240 #else
00241 default_traits = exact_exception
00242 #endif
00243 };
00244
00245 private:
00246 union {
00248 kind_type my_kind;
00249 uintptr_t _my_kind_aligner;
00250 };
00251
00253 task_group_context *my_parent;
00254
00256
00258 internal::context_list_node_t my_node;
00259
00261
00264 char _leading_padding[internal::NFS_MaxLineSize -
00265 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)];
00266
00268 uintptr_t my_cancellation_requested;
00269
00271
00274 uintptr_t my_version_and_traits;
00275
00277 exception_container_type *my_exception;
00278
00280
00283 void *my_owner;
00284
00286
00287 char _trailing_padding[internal::NFS_MaxLineSize - sizeof(intptr_t) - 2 * sizeof(void*)];
00288
00289 public:
00291
00318 task_group_context ( kind_type relation_with_parent = bound,
00319 uintptr_t traits = default_traits )
00320 : my_kind(relation_with_parent)
00321 , my_version_and_traits(1 | traits)
00322 {
00323 init();
00324 }
00325
00326 __TBB_EXPORTED_METHOD ~task_group_context ();
00327
00329
00336 void __TBB_EXPORTED_METHOD reset ();
00337
00339
00346 bool __TBB_EXPORTED_METHOD cancel_group_execution ();
00347
00349 bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
00350
00352
00358 void __TBB_EXPORTED_METHOD register_pending_exception ();
00359
00360 protected:
00362
00363 void __TBB_EXPORTED_METHOD init ();
00364
00365 private:
00366 friend class task;
00367 friend class internal::allocate_root_with_context_proxy;
00368
00369 static const kind_type binding_required = bound;
00370 static const kind_type binding_completed = kind_type(bound+1);
00371
00374 void propagate_cancellation_from_ancestors ();
00375
00377 bool is_alive () {
00378 #if TBB_USE_DEBUG
00379 return my_version_and_traits != 0xDeadBeef;
00380 #else
00381 return true;
00382 #endif
00383 }
00384 };
00385
00386 #endif
00387
00389
00390 class task: internal::no_copy {
00392 void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
00393
00395 internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
00396
00397 protected:
00399 task() {prefix().extra_state=1;}
00400
00401 public:
00403 virtual ~task() {}
00404
00406 virtual task* execute() = 0;
00407
00409 enum state_type {
00411 executing,
00413 reexecute,
00415 ready,
00417 allocated,
00419 freed,
00421 recycle
00422 };
00423
00424
00425
00426
00427
00429 static internal::allocate_root_proxy allocate_root() {
00430 return internal::allocate_root_proxy();
00431 }
00432
00433 #if __TBB_EXCEPTIONS
00435 static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
00436 return internal::allocate_root_with_context_proxy(ctx);
00437 }
00438 #endif
00439
00441
00442 internal::allocate_continuation_proxy& allocate_continuation() {
00443 return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
00444 }
00445
00447 internal::allocate_child_proxy& allocate_child() {
00448 return *reinterpret_cast<internal::allocate_child_proxy*>(this);
00449 }
00450
00452
00454 internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
00455 return internal::allocate_additional_child_of_proxy(*this,t);
00456 }
00457
00459
00463 void __TBB_EXPORTED_METHOD destroy( task& victim );
00464
00465
00466
00467
00468
00470
00476 void recycle_as_continuation() {
00477 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00478 prefix().state = allocated;
00479 }
00480
00482
00483 void recycle_as_safe_continuation() {
00484 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00485 prefix().state = recycle;
00486 }
00487
00489 void recycle_as_child_of( task& new_parent ) {
00490 internal::task_prefix& p = prefix();
00491 __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
00492 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
00493 __TBB_ASSERT( p.parent==NULL, "parent must be null" );
00494 __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
00495 __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
00496 p.state = allocated;
00497 p.parent = &new_parent;
00498 #if __TBB_EXCEPTIONS
00499 p.context = new_parent.prefix().context;
00500 #endif
00501 }
00502
00504
00505 void recycle_to_reexecute() {
00506 __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
00507 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
00508 prefix().state = reexecute;
00509 }
00510
00511
00512
00513 intptr_t depth() const {return 0;}
00514 void set_depth( intptr_t ) {}
00515 void add_to_depth( int ) {}
00516
00517
00518
00519
00520
00521
00523 void set_ref_count( int count ) {
00524 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00525 internal_set_ref_count(count);
00526 #else
00527 prefix().ref_count = count;
00528 #endif
00529 }
00530
00532
00533 void increment_ref_count() {
00534 __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
00535 }
00536
00538
00539 int decrement_ref_count() {
00540 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00541 return int(internal_decrement_ref_count());
00542 #else
00543 return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
00544 #endif
00545 }
00546
00548
00552 void spawn( task& child ) {
00553 prefix().owner->spawn( child, child.prefix().next );
00554 }
00555
00557 void spawn( task_list& list );
00558
00560 void spawn_and_wait_for_all( task& child ) {
00561 prefix().owner->wait_for_all( *this, &child );
00562 }
00563
00565 void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
00566
00568
00570 static void spawn_root_and_wait( task& root ) {
00571 root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
00572 }
00573
00575
00577 static void spawn_root_and_wait( task_list& root_list );
00578
00580
00581 void wait_for_all() {
00582 prefix().owner->wait_for_all( *this, NULL );
00583 }
00584
00586 static task& __TBB_EXPORTED_FUNC self();
00587
00589 task* parent() const {return prefix().parent;}
00590
00591 #if __TBB_EXCEPTIONS
00593 task_group_context* context() {return prefix().context;}
00594 #endif
00595
00597 bool is_stolen_task() const {
00598 internal::task_prefix& p = prefix();
00599 internal::task_prefix& q = parent()->prefix();
00600 return p.owner!=q.owner;
00601 }
00602
00603
00604
00605
00606
00608 state_type state() const {return state_type(prefix().state);}
00609
00611 int ref_count() const {
00612 #if TBB_USE_ASSERT
00613 internal::reference_count ref_count = prefix().ref_count;
00614 __TBB_ASSERT( ref_count==int(ref_count), "integer overflow error");
00615 #endif
00616 return int(prefix().ref_count);
00617 }
00618
00620 bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
00621
00622
00623
00624
00625
00627
00628 typedef internal::affinity_id affinity_id;
00629
00631 void set_affinity( affinity_id id ) {prefix().affinity = id;}
00632
00634 affinity_id affinity() const {return prefix().affinity;}
00635
00637
00641 virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
00642
00643 #if __TBB_EXCEPTIONS
00645
00646 bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
00647
00649 bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
00650 #endif
00651
00652 private:
00653 friend class task_list;
00654 friend class internal::scheduler;
00655 friend class internal::allocate_root_proxy;
00656 #if __TBB_EXCEPTIONS
00657 friend class internal::allocate_root_with_context_proxy;
00658 #endif
00659 friend class internal::allocate_continuation_proxy;
00660 friend class internal::allocate_child_proxy;
00661 friend class internal::allocate_additional_child_of_proxy;
00662
00663 friend class internal::task_group_base;
00664
00666
00667 internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
00668 return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
00669 }
00670 };
00671
00673
00674 class empty_task: public task {
00675 task* execute() {
00676 return NULL;
00677 }
00678 };
00679
00681
00683 class task_list: internal::no_copy {
00684 private:
00685 task* first;
00686 task** next_ptr;
00687 friend class task;
00688 public:
00690 task_list() : first(NULL), next_ptr(&first) {}
00691
00693 ~task_list() {}
00694
00696 bool empty() const {return !first;}
00697
00699 void push_back( task& task ) {
00700 task.prefix().next = NULL;
00701 *next_ptr = &task;
00702 next_ptr = &task.prefix().next;
00703 }
00704
00706 task& pop_front() {
00707 __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
00708 task* result = first;
00709 first = result->prefix().next;
00710 if( !first ) next_ptr = &first;
00711 return *result;
00712 }
00713
00715 void clear() {
00716 first=NULL;
00717 next_ptr=&first;
00718 }
00719 };
00720
00721 inline void task::spawn( task_list& list ) {
00722 if( task* t = list.first ) {
00723 prefix().owner->spawn( *t, *list.next_ptr );
00724 list.clear();
00725 }
00726 }
00727
00728 inline void task::spawn_root_and_wait( task_list& root_list ) {
00729 if( task* t = root_list.first ) {
00730 t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
00731 root_list.clear();
00732 }
00733 }
00734
00735 }
00736
00737 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
00738 return &tbb::internal::allocate_root_proxy::allocate(bytes);
00739 }
00740
00741 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
00742 tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
00743 }
00744
00745 #if __TBB_EXCEPTIONS
00746 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
00747 return &p.allocate(bytes);
00748 }
00749
00750 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
00751 p.free( *static_cast<tbb::task*>(task) );
00752 }
00753 #endif
00754
00755 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
00756 return &p.allocate(bytes);
00757 }
00758
00759 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
00760 p.free( *static_cast<tbb::task*>(task) );
00761 }
00762
00763 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
00764 return &p.allocate(bytes);
00765 }
00766
00767 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
00768 p.free( *static_cast<tbb::task*>(task) );
00769 }
00770
00771 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00772 return &p.allocate(bytes);
00773 }
00774
00775 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00776 p.free( *static_cast<tbb::task*>(task) );
00777 }
00778
00779 #endif