task.h

00001 /*
00002     Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_task_H
00022 #define __TBB_task_H
00023 
00024 #include "tbb_stddef.h"
00025 #include "tbb_machine.h"
00026 
00027 typedef struct ___itt_caller *__itt_caller;
00028 
00029 namespace tbb {
00030 
00031 class task;
00032 class task_list;
00033 
00034 #if __TBB_TASK_GROUP_CONTEXT
00035 class task_group_context;
00036 #endif /* __TBB_TASK_GROUP_CONTEXT */
00037 
00038 // MSVC does not allow taking the address of a member that was defined 
00039 // privately in task_base and made public in class task via a using declaration.
00040 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
00041 #define __TBB_TASK_BASE_ACCESS public
00042 #else
00043 #define __TBB_TASK_BASE_ACCESS private
00044 #endif
00045 
00046 namespace internal {
00047 
00048     class allocate_additional_child_of_proxy: no_assign {
00050         task* self;
00051         task& parent;
00052     public:
00053         explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {}
00054         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00055         void __TBB_EXPORTED_METHOD free( task& ) const;
00056     };
00057 
00058 }
00059 
00060 namespace interface5 {
00061     namespace internal {
00063 
00068         class task_base: tbb::internal::no_copy {
00069         __TBB_TASK_BASE_ACCESS:
00070             friend class tbb::task;
00071 
00073             static void spawn( task& t );
00074  
00076             static void spawn( task_list& list );
00077 
00079 
00081             static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
00082                 return tbb::internal::allocate_additional_child_of_proxy(t);
00083             }
00084 
00086 
00090             static void __TBB_EXPORTED_FUNC destroy( task& victim );
00091         }; 
00092     } // internal
00093 } // interface5
00094 
00096 namespace internal {
00097 
00098     class scheduler: no_copy {
00099     public:
00101         virtual void spawn( task& first, task*& next ) = 0;
00102 
00104         virtual void wait_for_all( task& parent, task* child ) = 0;
00105 
00107         virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
00108 
00110         //  Have to have it just to shut up overzealous compilation warnings
00111         virtual ~scheduler() = 0;
00112 #if __TBB_ARENA_PER_MASTER
00113 
00115         virtual void enqueue( task& t, void* reserved ) = 0;
00116 #endif /* __TBB_ARENA_PER_MASTER */
00117     };
00118 
00120 
00121     typedef intptr_t reference_count;
00122 
00124     typedef unsigned short affinity_id;
00125 
00126 #if __TBB_TASK_GROUP_CONTEXT
00127     struct context_list_node_t {
00128         context_list_node_t *my_prev,
00129                             *my_next;
00130     };
00131 
00132     class allocate_root_with_context_proxy: no_assign {
00133         task_group_context& my_context;
00134     public:
00135         allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
00136         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00137         void __TBB_EXPORTED_METHOD free( task& ) const;
00138     };
00139 #endif /* __TBB_TASK_GROUP_CONTEXT */
00140 
00141     class allocate_root_proxy: no_assign {
00142     public:
00143         static task& __TBB_EXPORTED_FUNC allocate( size_t size );
00144         static void __TBB_EXPORTED_FUNC free( task& );
00145     };
00146 
00147     class allocate_continuation_proxy: no_assign {
00148     public:
00149         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00150         void __TBB_EXPORTED_METHOD free( task& ) const;
00151     };
00152 
00153     class allocate_child_proxy: no_assign {
00154     public:
00155         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00156         void __TBB_EXPORTED_METHOD free( task& ) const;
00157     };
00158 
00160 
00165     class task_prefix {
00166     private:
00167         friend class tbb::task;
00168         friend class tbb::interface5::internal::task_base;
00169         friend class tbb::task_list;
00170         friend class internal::scheduler;
00171         friend class internal::allocate_root_proxy;
00172         friend class internal::allocate_child_proxy;
00173         friend class internal::allocate_continuation_proxy;
00174         friend class internal::allocate_additional_child_of_proxy;
00175 
00176 #if __TBB_TASK_GROUP_CONTEXT
00178 
00181         task_group_context  *context;
00182 #endif /* __TBB_TASK_GROUP_CONTEXT */
00183         
00185 
00190         scheduler* origin;
00191 
00193         scheduler* owner;
00194 
00196 
00199         tbb::task* parent;
00200 
00202 
00206         reference_count ref_count;
00207 
00209 
00210         int depth;
00211 
00213 
00214         unsigned char state;
00215 
00217 
00222         unsigned char extra_state;
00223 
00224         affinity_id affinity;
00225 
00227         tbb::task* next;
00228 
00230         tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
00231     };
00232 
00233 } // namespace internal
00235 
00236 #if __TBB_TASK_GROUP_CONTEXT
00237 
00238 #if TBB_USE_CAPTURED_EXCEPTION
00239     class tbb_exception;
00240 #else
00241     namespace internal {
00242         class tbb_exception_ptr;
00243     }
00244 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
00245 
00247 
00267 class task_group_context : internal::no_copy {
00268 private:
00269 #if TBB_USE_CAPTURED_EXCEPTION
00270     typedef tbb_exception exception_container_type;
00271 #else
00272     typedef internal::tbb_exception_ptr exception_container_type;
00273 #endif
00274 
00275     enum version_traits_word_layout {
00276         traits_offset = 16,
00277         version_mask = 0xFFFF,
00278         traits_mask = 0xFFFFul << traits_offset
00279     };
00280 
00281 public:
00282     enum kind_type {
00283         isolated,
00284         bound
00285     };
00286 
00287     enum traits_type {
00288         exact_exception = 0x0001ul << traits_offset,
00289         concurrent_wait = 0x0004ul << traits_offset,
00290 #if TBB_USE_CAPTURED_EXCEPTION
00291         default_traits = 0
00292 #else
00293         default_traits = exact_exception
00294 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
00295     };
00296 
00297 private:
00298     union {
00300         kind_type my_kind;
00301         uintptr_t _my_kind_aligner;
00302     };
00303 
00305     task_group_context *my_parent;
00306 
00308 
00310     internal::context_list_node_t my_node;
00311 
00313     __itt_caller itt_caller;
00314 
00316 
00319     char _leading_padding[internal::NFS_MaxLineSize - 
00320                     2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
00321                           - sizeof(__itt_caller)];
00322     
00324     uintptr_t my_cancellation_requested;
00325     
00327 
00330     uintptr_t  my_version_and_traits;
00331 
00333     exception_container_type *my_exception;
00334 
00336 
00339     void *my_owner;
00340 
00342 
00343     char _trailing_padding[internal::NFS_MaxLineSize - sizeof(intptr_t) - 2 * sizeof(void*)];
00344 
00345 public:
00347 
00374     task_group_context ( kind_type relation_with_parent = bound,
00375                          uintptr_t traits = default_traits )
00376         : my_kind(relation_with_parent)
00377         , my_version_and_traits(1 | traits)
00378     {
00379         init();
00380     }
00381 
00382     __TBB_EXPORTED_METHOD ~task_group_context ();
00383 
00385 
00392     void __TBB_EXPORTED_METHOD reset ();
00393 
00395 
00402     bool __TBB_EXPORTED_METHOD cancel_group_execution ();
00403 
00405     bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
00406 
00408 
00414     void __TBB_EXPORTED_METHOD register_pending_exception ();
00415 
00416 protected:
00418 
00419     void __TBB_EXPORTED_METHOD init ();
00420 
00421 private:
00422     friend class task;
00423     friend class internal::allocate_root_with_context_proxy;
00424 
00425     static const kind_type binding_required = bound;
00426     static const kind_type binding_completed = kind_type(bound+1);
00427     static const kind_type detached = kind_type(binding_completed+1);
00428     static const kind_type dying = kind_type(detached+1);
00429 
00432     void propagate_cancellation_from_ancestors ();
00433 
00434 }; // class task_group_context
00435 
00436 #endif /* __TBB_TASK_GROUP_CONTEXT */
00437 
00439 
00440 class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
00441 
00443     void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
00444 
00446     internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
00447 
00448 protected:
00450     task() {prefix().extra_state=1;}
00451 
00452 public:
00454     virtual ~task() {}
00455 
00457     virtual task* execute() = 0;
00458 
00460     enum state_type {
00462         executing,
00464         reexecute,
00466         ready,
00468         allocated,
00470         freed,
00472         recycle 
00473     };
00474 
00475     //------------------------------------------------------------------------
00476     // Allocating tasks
00477     //------------------------------------------------------------------------
00478 
00480     static internal::allocate_root_proxy allocate_root() {
00481         return internal::allocate_root_proxy();
00482     }
00483 
00484 #if __TBB_TASK_GROUP_CONTEXT
00486     static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
00487         return internal::allocate_root_with_context_proxy(ctx);
00488     }
00489 #endif /* __TBB_TASK_GROUP_CONTEXT */
00490 
00492 
00493     internal::allocate_continuation_proxy& allocate_continuation() {
00494         return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
00495     }
00496 
00498     internal::allocate_child_proxy& allocate_child() {
00499         return *reinterpret_cast<internal::allocate_child_proxy*>(this);
00500     }
00501 
00503     using task_base::allocate_additional_child_of;
00504 
00505 #if __TBB_DEPRECATED_TASK_INTERFACE
00507 
00511     void __TBB_EXPORTED_METHOD destroy( task& t );
00512 #else /* !__TBB_DEPRECATED_TASK_INTERFACE */
00514     using task_base::destroy;
00515 #endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
00516 
00517     //------------------------------------------------------------------------
00518     // Recycling of tasks
00519     //------------------------------------------------------------------------
00520 
00522 
00528     void recycle_as_continuation() {
00529         __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00530         prefix().state = allocated;
00531     }
00532 
00534 
00536     void recycle_as_safe_continuation() {
00537         __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00538         prefix().state = recycle;
00539     }
00540 
00542     void recycle_as_child_of( task& new_parent ) {
00543         internal::task_prefix& p = prefix();
00544         __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
00545         __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
00546         __TBB_ASSERT( p.parent==NULL, "parent must be null" );
00547         __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
00548         __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
00549         p.state = allocated;
00550         p.parent = &new_parent;
00551 #if __TBB_TASK_GROUP_CONTEXT
00552         p.context = new_parent.prefix().context;
00553 #endif /* __TBB_TASK_GROUP_CONTEXT */
00554     }
00555 
00557 
00558     void recycle_to_reexecute() {
00559         __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
00560         __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
00561         prefix().state = reexecute;
00562     }
00563 
00564     // All depth-related methods are obsolete, and are retained for the sake 
00565     // of backward source compatibility only
00566     intptr_t depth() const {return 0;}
00567     void set_depth( intptr_t ) {}
00568     void add_to_depth( int ) {}
00569 
00570 
00571     //------------------------------------------------------------------------
00572     // Spawning and blocking
00573     //------------------------------------------------------------------------
00574 
00576     void set_ref_count( int count ) {
00577 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00578         internal_set_ref_count(count);
00579 #else
00580         prefix().ref_count = count;
00581 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
00582     }
00583 
00585 
00586     void increment_ref_count() {
00587         __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
00588     }
00589 
00591 
00592     int decrement_ref_count() {
00593 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00594         return int(internal_decrement_ref_count());
00595 #else
00596         return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
00597 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
00598     }
00599 
00601     using task_base::spawn;
00602 
00604     void spawn_and_wait_for_all( task& child ) {
00605         prefix().owner->wait_for_all( *this, &child );
00606     }
00607 
00609     void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
00610 
00612     static void spawn_root_and_wait( task& root ) {
00613         root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
00614     }
00615 
00617 
00619     static void spawn_root_and_wait( task_list& root_list );
00620 
00622 
00623     void wait_for_all() {
00624         prefix().owner->wait_for_all( *this, NULL );
00625     }
00626 
00627 #if __TBB_ARENA_PER_MASTER
00629     static void enqueue( task& t ) {
00630         t.prefix().owner->enqueue( t, NULL );
00631     }
00632 
00633 #endif /* __TBB_ARENA_PER_MASTER */
00635     static task& __TBB_EXPORTED_FUNC self();
00636 
00638     task* parent() const {return prefix().parent;}
00639 
00640 #if __TBB_TASK_GROUP_CONTEXT
00642     task_group_context* context() {return prefix().context;}
00643 #endif /* __TBB_TASK_GROUP_CONTEXT */   
00644 
00646     bool is_stolen_task() const {
00647         return (prefix().extra_state & 0x80)!=0;
00648     }
00649 
00650     //------------------------------------------------------------------------
00651     // Debugging
00652     //------------------------------------------------------------------------
00653 
00655     state_type state() const {return state_type(prefix().state);}
00656 
00658     int ref_count() const {
00659 #if TBB_USE_ASSERT
00660         internal::reference_count ref_count_ = prefix().ref_count;
00661         __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
00662 #endif
00663         return int(prefix().ref_count);
00664     }
00665 
00667     bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
00668 
00669     //------------------------------------------------------------------------
00670     // Affinity
00671     //------------------------------------------------------------------------
00672  
00674 
00675     typedef internal::affinity_id affinity_id;
00676 
00678     void set_affinity( affinity_id id ) {prefix().affinity = id;}
00679 
00681     affinity_id affinity() const {return prefix().affinity;}
00682 
00684 
00688     virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
00689 
00690 #if __TBB_TASK_GROUP_CONTEXT
00692 
00693     bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
00694 
00696     bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
00697 #endif /* __TBB_TASK_GROUP_CONTEXT */
00698 
00699 private:
00700     friend class interface5::internal::task_base;
00701     friend class task_list;
00702     friend class internal::scheduler;
00703     friend class internal::allocate_root_proxy;
00704 #if __TBB_TASK_GROUP_CONTEXT
00705     friend class internal::allocate_root_with_context_proxy;
00706 #endif /* __TBB_TASK_GROUP_CONTEXT */
00707     friend class internal::allocate_continuation_proxy;
00708     friend class internal::allocate_child_proxy;
00709     friend class internal::allocate_additional_child_of_proxy;
00710     
00712 
00713     internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
00714         return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
00715     }
00716 }; // class task
00717 
00719 
00720 class empty_task: public task {
00721     /*override*/ task* execute() {
00722         return NULL;
00723     }
00724 };
00725 
00727 
00729 class task_list: internal::no_copy {
00730 private:
00731     task* first;
00732     task** next_ptr;
00733     friend class task;
00734     friend class interface5::internal::task_base;
00735 public:
00737     task_list() : first(NULL), next_ptr(&first) {}
00738 
00740     ~task_list() {}
00741 
00743     bool empty() const {return !first;}
00744 
00746     void push_back( task& task ) {
00747         task.prefix().next = NULL;
00748         *next_ptr = &task;
00749         next_ptr = &task.prefix().next;
00750     }
00751 
00753     task& pop_front() {
00754         __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
00755         task* result = first;
00756         first = result->prefix().next;
00757         if( !first ) next_ptr = &first;
00758         return *result;
00759     }
00760 
00762     void clear() {
00763         first=NULL;
00764         next_ptr=&first;
00765     }
00766 };
00767 
00768 inline void interface5::internal::task_base::spawn( task& t ) {
00769     t.prefix().owner->spawn( t, t.prefix().next );
00770 }
00771 
00772 inline void interface5::internal::task_base::spawn( task_list& list ) {
00773     if( task* t = list.first ) {
00774         t->prefix().owner->spawn( *t, *list.next_ptr );
00775         list.clear();
00776     }
00777 }
00778 
00779 inline void task::spawn_root_and_wait( task_list& root_list ) {
00780     if( task* t = root_list.first ) {
00781         t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
00782         root_list.clear();
00783     }
00784 }
00785 
00786 } // namespace tbb
00787 
00788 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
00789     return &tbb::internal::allocate_root_proxy::allocate(bytes);
00790 }
00791 
00792 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
00793     tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
00794 }
00795 
00796 #if __TBB_TASK_GROUP_CONTEXT
00797 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
00798     return &p.allocate(bytes);
00799 }
00800 
00801 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
00802     p.free( *static_cast<tbb::task*>(task) );
00803 }
00804 #endif /* __TBB_TASK_GROUP_CONTEXT */
00805 
00806 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
00807     return &p.allocate(bytes);
00808 }
00809 
00810 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
00811     p.free( *static_cast<tbb::task*>(task) );
00812 }
00813 
00814 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
00815     return &p.allocate(bytes);
00816 }
00817 
00818 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
00819     p.free( *static_cast<tbb::task*>(task) );
00820 }
00821 
00822 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00823     return &p.allocate(bytes);
00824 }
00825 
00826 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00827     p.free( *static_cast<tbb::task*>(task) );
00828 }
00829 
00830 #endif /* __TBB_task_H */

Copyright © 2005-2010 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.