enumerable_thread_specific.h

00001 /*
00002     Copyright 2005-2009 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_enumerable_thread_specific_H
00022 #define __TBB_enumerable_thread_specific_H
00023 
00024 #include "concurrent_vector.h"
00025 #include "tbb_thread.h"
00026 #include "concurrent_hash_map.h"
00027 #include "cache_aligned_allocator.h"
00028 #if __SUNPRO_CC
00029 #include <string.h>  // for memcpy
00030 #endif
00031 
00032 #if _WIN32||_WIN64
00033 #include <windows.h>
00034 #else
00035 #include <pthread.h>
00036 #endif
00037 
00038 namespace tbb {
00039 
00041     enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
00042 
00044     namespace internal {
00045         
00047         template< typename Container, typename Value >
00048         class enumerable_thread_specific_iterator 
00049 #if defined(_WIN64) && defined(_MSC_VER) 
00050             // Ensure that Microsoft's internal template function _Val_type works correctly.
00051             : public std::iterator<std::random_access_iterator_tag,Value>
00052 #endif /* defined(_WIN64) && defined(_MSC_VER) */
00053         {
00055         
00056             Container *my_container;
00057             typename Container::size_type my_index;
00058             mutable Value *my_value;
00059         
00060             template<typename C, typename T>
00061             friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset, 
00062                                                                        const enumerable_thread_specific_iterator<C,T>& v );
00063         
00064             template<typename C, typename T, typename U>
00065             friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i, 
00066                                     const enumerable_thread_specific_iterator<C,U>& j );
00067         
00068             template<typename C, typename T, typename U>
00069             friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i, 
00070                                    const enumerable_thread_specific_iterator<C,U>& j );
00071         
00072             template<typename C, typename T, typename U>
00073             friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
00074             
00075             template<typename C, typename U> 
00076             friend class enumerable_thread_specific_iterator;
00077         
00078             public:
00079         
00080             enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : 
00081                 my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
00082         
00084             enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
00085         
00086             template<typename U>
00087             enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
00088                     my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
00089         
00090             enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
00091                 return enumerable_thread_specific_iterator(*my_container, my_index + offset);
00092             }
00093         
00094             enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
00095                 my_index += offset;
00096                 my_value = NULL;
00097                 return *this;
00098             }
00099         
00100             enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
00101                 return enumerable_thread_specific_iterator( *my_container, my_index-offset );
00102             }
00103         
00104             enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
00105                 my_index -= offset;
00106                 my_value = NULL;
00107                 return *this;
00108             }
00109         
00110             Value& operator*() const {
00111                 Value* value = my_value;
00112                 if( !value ) {
00113                     value = my_value = &(*my_container)[my_index].value;
00114                 }
00115                 __TBB_ASSERT( value==&(*my_container)[my_index].value, "corrupt cache" );
00116                 return *value;
00117             }
00118         
00119             Value& operator[]( ptrdiff_t k ) const {
00120                return (*my_container)[my_index + k].value;
00121             }
00122         
00123             Value* operator->() const {return &operator*();}
00124         
00125             enumerable_thread_specific_iterator& operator++() {
00126                 ++my_index;
00127                 my_value = NULL;
00128                 return *this;
00129             }
00130         
00131             enumerable_thread_specific_iterator& operator--() {
00132                 --my_index;
00133                 my_value = NULL;
00134                 return *this;
00135             }
00136         
00138             enumerable_thread_specific_iterator operator++(int) {
00139                 enumerable_thread_specific_iterator result = *this;
00140                 ++my_index;
00141                 my_value = NULL;
00142                 return result;
00143             }
00144         
00146             enumerable_thread_specific_iterator operator--(int) {
00147                 enumerable_thread_specific_iterator result = *this;
00148                 --my_index;
00149                 my_value = NULL;
00150                 return result;
00151             }
00152         
00153             // STL support
00154             typedef ptrdiff_t difference_type;
00155             typedef Value value_type;
00156             typedef Value* pointer;
00157             typedef Value& reference;
00158             typedef std::random_access_iterator_tag iterator_category;
00159         };
00160         
00161         template<typename Container, typename T>
00162         enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset, 
00163                                                                     const enumerable_thread_specific_iterator<Container,T>& v ) {
00164             return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
00165         }
00166         
00167         template<typename Container, typename T, typename U>
00168         bool operator==( const enumerable_thread_specific_iterator<Container,T>& i, 
00169                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00170             return i.my_index==j.my_index && i.my_container == j.my_container;
00171         }
00172         
00173         template<typename Container, typename T, typename U>
00174         bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i, 
00175                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00176             return !(i==j);
00177         }
00178         
00179         template<typename Container, typename T, typename U>
00180         bool operator<( const enumerable_thread_specific_iterator<Container,T>& i, 
00181                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00182             return i.my_index<j.my_index;
00183         }
00184         
00185         template<typename Container, typename T, typename U>
00186         bool operator>( const enumerable_thread_specific_iterator<Container,T>& i, 
00187                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00188             return j<i;
00189         }
00190         
00191         template<typename Container, typename T, typename U>
00192         bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i, 
00193                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00194             return !(i<j);
00195         }
00196         
00197         template<typename Container, typename T, typename U>
00198         bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i, 
00199                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00200             return !(j<i);
00201         }
00202         
00203         template<typename Container, typename T, typename U>
00204         ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i, 
00205                              const enumerable_thread_specific_iterator<Container,U>& j ) {
00206             return i.my_index-j.my_index;
00207         }
00208 
00209     template<typename SegmentedContainer, typename Value >
00210         class segmented_iterator
00211 #if defined(_WIN64) && defined(_MSC_VER)
00212         : public std::iterator<std::input_iterator_tag, Value>
00213 #endif
00214         {
00215             template<typename C, typename T, typename U>
00216             friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00217 
00218             template<typename C, typename T, typename U>
00219             friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00220             
00221             template<typename C, typename U> 
00222             friend class segmented_iterator;
00223 
00224             public:
00225 
00226                 segmented_iterator() {my_segcont = NULL;}
00227 
00228                 segmented_iterator( const SegmentedContainer& _segmented_container ) : 
00229                     my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
00230                     outer_iter(my_segcont->end()) { }
00231 
00232                 ~segmented_iterator() {}
00233 
00234                 typedef typename SegmentedContainer::iterator outer_iterator;
00235                 typedef typename SegmentedContainer::value_type InnerContainer;
00236                 typedef typename InnerContainer::iterator inner_iterator;
00237 
00238                 // STL support
00239                 typedef ptrdiff_t difference_type;
00240                 typedef Value value_type;
00241                 typedef typename SegmentedContainer::size_type size_type;
00242                 typedef Value* pointer;
00243                 typedef Value& reference;
00244                 typedef std::input_iterator_tag iterator_category;
00245 
00246                 // Copy Constructor
00247                 template<typename U>
00248                 segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
00249                     my_segcont(other.my_segcont),
00250                     outer_iter(other.outer_iter),
00251                     // can we assign a default-constructed iterator to inner if we're at the end?
00252                     inner_iter(other.inner_iter)
00253                 {}
00254 
00255                 // assignment
00256                 template<typename U>
00257                 segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
00258                     if(this != &other) {
00259                         my_segcont = other.my_segcont;
00260                         outer_iter = other.outer_iter;
00261                         if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
00262                     }
00263                     return *this;
00264                 }
00265 
00266                 // allow assignment of outer iterator to segmented iterator.  Once it is
00267                 // assigned, move forward until a non-empty inner container is found or
00268                 // the end of the outer container is reached.
00269                 segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
00270                     __TBB_ASSERT(my_segcont != NULL, NULL);
00271                     // check that this iterator points to something inside the segmented container
00272                     for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
00273                         if( !outer_iter->empty() ) {
00274                             inner_iter = outer_iter->begin();
00275                             break;
00276                         }
00277                     }
00278                     return *this;
00279                 }
00280 
00281                 // pre-increment
00282                 segmented_iterator& operator++() {
00283                     advance_me();
00284                     return *this;
00285                 }
00286 
00287                 // post-increment
00288                 segmented_iterator operator++(int) {
00289                     segmented_iterator tmp = *this;
00290                     operator++();
00291                     return tmp;
00292                 }
00293 
00294                 bool operator==(const outer_iterator& other_outer) const {
00295                     __TBB_ASSERT(my_segcont != NULL, NULL);
00296                     return (outer_iter == other_outer &&
00297                             (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
00298                 }
00299 
00300                 bool operator!=(const outer_iterator& other_outer) const {
00301                     return !operator==(other_outer);
00302 
00303                 }
00304 
00305                 // (i)* RHS
00306                 reference operator*() const {
00307                     __TBB_ASSERT(my_segcont != NULL, NULL);
00308                     __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
00309                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
00310                     return *inner_iter;
00311                 }
00312 
00313                 // i->
00314                 pointer operator->() const { return &operator*();}
00315 
00316             private:
00317                 SegmentedContainer*             my_segcont;
00318                 outer_iterator outer_iter;
00319                 inner_iterator inner_iter;
00320 
00321                 void advance_me() {
00322                     __TBB_ASSERT(my_segcont != NULL, NULL);
00323                     __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
00324                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
00325                     ++inner_iter;
00326                     while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
00327                         inner_iter = outer_iter->begin();
00328                     }
00329                 }
00330         };    // segmented_iterator
00331 
00332         template<typename SegmentedContainer, typename T, typename U>
00333         bool operator==( const segmented_iterator<SegmentedContainer,T>& i, 
00334                          const segmented_iterator<SegmentedContainer,U>& j ) {
00335             if(i.my_segcont != j.my_segcont) return false;
00336             if(i.my_segcont == NULL) return true;
00337             if(i.outer_iter != j.outer_iter) return false;
00338             if(i.outer_iter == i.my_segcont->end()) return true;
00339             return i.inner_iter == j.inner_iter;
00340         }
00341 
00342         // !=
00343         template<typename SegmentedContainer, typename T, typename U>
00344         bool operator!=( const segmented_iterator<SegmentedContainer,T>& i, 
00345                          const segmented_iterator<SegmentedContainer,U>& j ) {
00346             return !(i==j);
00347         }
00348 
00349         // empty template for following specializations
00350         template<ets_key_usage_type et>
00351         struct tls_manager {};
00352         
00354         template <>
00355         struct tls_manager<ets_no_key> {
00356             typedef size_t tls_key_t;
00357             static inline void create_key( tls_key_t &) { }
00358             static inline void destroy_key( tls_key_t & ) { }
00359             static inline void set_tls( tls_key_t &, void *  ) { }
00360             static inline void * get_tls( tls_key_t & ) { return (size_t)0; }
00361         };
00362 
00364         template <>
00365         struct tls_manager <ets_key_per_instance> {
00366 #if _WIN32||_WIN64
00367             typedef DWORD tls_key_t;
00368             static inline void create_key( tls_key_t &k) { k = TlsAlloc(); }
00369             static inline void destroy_key( tls_key_t &k) { TlsFree(k); }
00370             static inline void set_tls( tls_key_t &k, void * value) { TlsSetValue(k, (LPVOID)value); }
00371             static inline void * get_tls( tls_key_t &k ) { return (void *)TlsGetValue(k); }
00372 #else
00373             typedef pthread_key_t tls_key_t;
00374             static inline void create_key( tls_key_t &k) { pthread_key_create(&k, NULL); }
00375             static inline void destroy_key( tls_key_t &k) { pthread_key_delete(k); }
00376             static inline void set_tls( tls_key_t &k, void * value) { pthread_setspecific(k, value); }
00377             static inline void * get_tls( tls_key_t &k ) { return pthread_getspecific(k); }
00378 #endif
00379         };
00380 
00381         class thread_hash_compare {
00382         public:
00383             // using hack suggested by Arch to get value for thread id for hashing...
00384 #if _WIN32||_WIN64
00385             typedef DWORD thread_key;
00386 #else
00387             typedef pthread_t thread_key;
00388 #endif
00389             static thread_key my_thread_key(const tbb_thread::id j) {
00390                 thread_key key_val;
00391                 memcpy(&key_val, &j, sizeof(thread_key));
00392                 return key_val;
00393             }
00394 
00395             bool equal( const thread_key j, const thread_key k) const {
00396                 return j == k;
00397             }
00398             unsigned long hash(const thread_key k) const {
00399                 return (unsigned long)k;
00400             }
00401         };
00402 
00403         // storage for initialization function pointer
00404         template<typename T>
00405         struct callback_base {
00406             virtual T apply( ) = 0;
00407             virtual void destroy( ) = 0;
00408             // need to be able to create copies of callback_base for copy constructor
00409             virtual callback_base* make_copy() = 0;
00410             // need virtual destructor to satisfy GCC compiler warning
00411             virtual ~callback_base() { }
00412         };
00413 
00414         template <typename T, typename Functor>
00415         struct callback_leaf : public callback_base<T> {
00416             typedef Functor my_callback_type;
00417             typedef callback_leaf<T,Functor> my_type;
00418             typedef my_type* callback_pointer;
00419             typedef typename tbb::tbb_allocator<my_type> my_allocator_type;
00420             Functor f;
00421             callback_leaf( const Functor& f_) : f(f_) {
00422             }
00423 
00424             static callback_pointer new_callback(const Functor& f_ ) {
00425                 void* new_void = my_allocator_type().allocate(1);
00426                 callback_pointer new_cb = new (new_void) callback_leaf<T,Functor>(f_); // placement new
00427                 return new_cb;
00428             }
00429 
00430             /* override */ callback_pointer make_copy() {
00431                 return new_callback( f );
00432             }
00433 
00434              /* override */ void destroy( ) {
00435                  callback_pointer my_ptr = this;
00436                  my_allocator_type().destroy(my_ptr);
00437                  my_allocator_type().deallocate(my_ptr,1);
00438              }
00439             /* override */ T apply() { return f(); }  // does copy construction of returned value.
00440         };
00441 
00442         template<typename Key, typename T, typename HC, typename A>
00443         class ets_concurrent_hash_map : public tbb::concurrent_hash_map<Key, T, HC, A> {
00444         public:
00445             typedef tbb::concurrent_hash_map<Key, T, HC, A> base_type;
00446             typedef typename base_type::const_pointer const_pointer;
00447             typedef typename base_type::key_type key_type;
00448             const_pointer find( const key_type &k ) {
00449                 return internal_fast_find( k );
00450             } // make public
00451         };
00452     
00453     } // namespace internal
00455 
00457     template <typename T, 
00458               typename Allocator=cache_aligned_allocator<T>, 
00459               ets_key_usage_type ETS_key_type=ets_no_key > 
00460     class enumerable_thread_specific { 
00461 
00462         template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
00463     
00464         typedef internal::tls_manager< ETS_key_type > my_tls_manager;
00465 
00467         template<typename U>
00468         struct padded_element {
00469             U value;
00470             char padding[ ( (sizeof(U) - 1) / internal::NFS_MaxLineSize + 1 ) * internal::NFS_MaxLineSize - sizeof(U) ];
00471             padded_element(const U &v) : value(v) {}
00472             padded_element() {}
00473         };
00474     
00476         template<typename I>
00477         class generic_range_type: public blocked_range<I> {
00478         public:
00479             typedef T value_type;
00480             typedef T& reference;
00481             typedef const T& const_reference;
00482             typedef I iterator;
00483             typedef ptrdiff_t difference_type;
00484             generic_range_type( I begin_, I end_, size_t grainsize = 1) : blocked_range<I>(begin_,end_,grainsize) {} 
00485             template<typename U>
00486             generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {} 
00487             generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
00488         };
00489     
00490         typedef typename Allocator::template rebind< padded_element<T> >::other padded_allocator_type;
00491         typedef tbb::concurrent_vector< padded_element<T>, padded_allocator_type > internal_collection_type;
00492         typedef typename internal_collection_type::size_type hash_table_index_type; // storing array indices rather than iterators to simplify
00493         // copying the hash table that correlates thread IDs with concurrent vector elements.
00494         
00495         typedef typename Allocator::template rebind< std::pair< typename internal::thread_hash_compare::thread_key, hash_table_index_type > >::other hash_element_allocator;
00496         typedef internal::ets_concurrent_hash_map< typename internal::thread_hash_compare::thread_key, hash_table_index_type, internal::thread_hash_compare, hash_element_allocator > thread_to_index_type;
00497 
00498         typename my_tls_manager::tls_key_t my_key;
00499 
00500         void reset_key() {
00501             my_tls_manager::destroy_key(my_key);
00502             my_tls_manager::create_key(my_key); 
00503         }
00504 
00505         internal::callback_base<T> *my_finit_callback;
00506 
00507         // need to use a pointed-to exemplar because T may not be assignable.
00508         // using tbb_allocator instead of padded_element_allocator because we may be
00509         // copying an exemplar from one instantiation of ETS to another with a different
00510         // allocator.
00511         typedef typename tbb::tbb_allocator<padded_element<T> > exemplar_allocator_type;
00512         static padded_element<T> * create_exemplar(const T& my_value) {
00513             padded_element<T> *new_exemplar = 0;
00514             // void *new_space = padded_allocator_type().allocate(1);
00515             void *new_space = exemplar_allocator_type().allocate(1);
00516             new_exemplar = new(new_space) padded_element<T>(my_value);
00517             return new_exemplar;
00518         }
00519 
00520         static padded_element<T> *create_exemplar( ) {
00521             // void *new_space = padded_allocator_type().allocate(1);
00522             void *new_space = exemplar_allocator_type().allocate(1);
00523             padded_element<T> *new_exemplar = new(new_space) padded_element<T>( );
00524             return new_exemplar;
00525         }
00526 
00527         static void free_exemplar(padded_element<T> *my_ptr) {
00528             // padded_allocator_type().destroy(my_ptr);
00529             // padded_allocator_type().deallocate(my_ptr,1);
00530             exemplar_allocator_type().destroy(my_ptr);
00531             exemplar_allocator_type().deallocate(my_ptr,1);
00532         }
00533 
00534         padded_element<T>* my_exemplar_ptr;
00535 
00536         internal_collection_type my_locals;
00537         thread_to_index_type my_hash_tbl;
00538     
00539     public:
00540     
00542         typedef Allocator allocator_type;
00543         typedef T value_type;
00544         typedef T& reference;
00545         typedef const T& const_reference;
00546         typedef T* pointer;
00547         typedef const T* const_pointer;
00548         typedef typename internal_collection_type::size_type size_type;
00549         typedef typename internal_collection_type::difference_type difference_type;
00550     
00551         // Iterator types
00552         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
00553         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
00554 
00555         // Parallel range types
00556         typedef generic_range_type< iterator > range_type;
00557         typedef generic_range_type< const_iterator > const_range_type;
00558     
00560         enumerable_thread_specific() : my_finit_callback(0) { 
00561             my_exemplar_ptr = create_exemplar();
00562             my_tls_manager::create_key(my_key); 
00563         }
00564 
00566         // Finit should be a function taking 0 parameters and returning a T
00567         template <typename Finit>
00568         enumerable_thread_specific( Finit _finit )
00569         {
00570             my_finit_callback = internal::callback_leaf<T,Finit>::new_callback( _finit );
00571             my_tls_manager::create_key(my_key);
00572             my_exemplar_ptr = 0; // don't need exemplar if function is provided
00573         }
00574     
00576         enumerable_thread_specific(const T &_exemplar) : my_finit_callback(0) {
00577             my_exemplar_ptr = create_exemplar(_exemplar);
00578             my_tls_manager::create_key(my_key); 
00579         }
00580     
00582         ~enumerable_thread_specific() { 
00583             my_tls_manager::destroy_key(my_key); 
00584             if(my_finit_callback) {
00585                 my_finit_callback->destroy();
00586             }
00587             if(my_exemplar_ptr)
00588             {
00589                 free_exemplar(my_exemplar_ptr);
00590             }
00591         }
00592       
00594         reference local() {
00595             bool exists;
00596             return local(exists);
00597         }
00598 
00600         reference local(bool& exists)  {
00601             if ( pointer local_ptr = static_cast<pointer>(my_tls_manager::get_tls(my_key)) ) {
00602                 exists = true;
00603                return *local_ptr;
00604             }
00605             hash_table_index_type local_index;
00606             typename internal::thread_hash_compare::thread_key my_t_key = internal::thread_hash_compare::my_thread_key(tbb::this_tbb_thread::get_id());
00607             {
00608                 typename thread_to_index_type::const_pointer my_existing_entry;
00609                 my_existing_entry = my_hash_tbl.find(my_t_key);
00610                 if(my_existing_entry) {
00611                     exists = true;
00612                     local_index = my_existing_entry->second;
00613                 }
00614                 else {
00615 
00616                     // see if the table entry can be found by accessor
00617                     typename thread_to_index_type::accessor a;
00618                     if(!my_hash_tbl.insert(a, my_t_key)) {
00619                         exists = true;
00620                         local_index = a->second;
00621                     }
00622                     else {
00623                         // create new entry
00624                         exists = false;
00625                         if(my_finit_callback) {
00626                             // convert iterator to array index
00627 #if TBB_DEPRECATED
00628                             local_index = my_locals.push_back(my_finit_callback->apply());
00629 #else
00630                             local_index = my_locals.push_back(my_finit_callback->apply()) - my_locals.begin();
00631 #endif
00632                         }
00633                         else {
00634                             // convert iterator to array index
00635 #if TBB_DEPRECATED
00636                             local_index = my_locals.push_back(*my_exemplar_ptr);
00637 #else
00638                             local_index = my_locals.push_back(*my_exemplar_ptr) - my_locals.begin();
00639 #endif
00640                         }
00641                         // insert into hash table
00642                         a->second = local_index;
00643                     }
00644                 }
00645             }
00646 
00647             reference local_ref = (my_locals[local_index].value);
00648             my_tls_manager::set_tls( my_key, static_cast<void *>(&local_ref) );
00649             return local_ref;
00650         } // local
00651 
00653         size_type size() const { return my_locals.size(); }
00654     
00656         bool empty() const { return my_locals.empty(); }
00657     
00659         iterator begin() { return iterator( my_locals, 0 ); }
00661         iterator end() { return iterator(my_locals, my_locals.size() ); }
00662     
00664         const_iterator begin() const { return const_iterator(my_locals, 0); }
00665     
00667         const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
00668 
00670         range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } 
00671         
00673         const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
00674     
00676         void clear() {
00677             my_locals.clear();
00678             my_hash_tbl.clear();
00679             reset_key();
00680             // callback is not destroyed
00681             // exemplar is not destroyed
00682         }
00683 
00684         // STL container methods
00685         // copy constructor
00686 
00687     private:
00688 
00689         template<typename U, typename A2, ets_key_usage_type C2>
00690         void
00691         internal_copy_construct( const enumerable_thread_specific<U, A2, C2>& other) {
00692             typedef typename tbb::enumerable_thread_specific<U, A2, C2> other_type;
00693             for(typename other_type::const_iterator ci = other.begin(); ci != other.end(); ++ci) {
00694                 my_locals.push_back(*ci);
00695             }
00696             if(other.my_finit_callback) {
00697                 my_finit_callback = other.my_finit_callback->make_copy();
00698             }
00699             else {
00700                 my_finit_callback = 0;
00701             }
00702             if(other.my_exemplar_ptr) {
00703                 my_exemplar_ptr = create_exemplar(other.my_exemplar_ptr->value);
00704             }
00705             else {
00706                 my_exemplar_ptr = 0;
00707             }
00708             my_tls_manager::create_key(my_key);
00709         }
00710 
00711     public:
00712 
00713         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00714         enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : my_hash_tbl(other.my_hash_tbl) 
00715         {   // Have to do push_back because the contained elements are not necessarily assignable.
00716             internal_copy_construct(other);
00717         }
00718 
00719         // non-templatized version
00720         enumerable_thread_specific( const enumerable_thread_specific& other ) : my_hash_tbl(other.my_hash_tbl) 
00721         {
00722             internal_copy_construct(other);
00723         }
00724 
00725     private:
00726 
00727         template<typename U, typename A2, ets_key_usage_type C2>
00728         enumerable_thread_specific &
00729         internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
00730             typedef typename tbb::enumerable_thread_specific<U, A2, C2> other_type;
00731             if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
00732                 this->clear(); // resets TLS key
00733                 my_hash_tbl = other.my_hash_tbl;
00734                 // cannot use assign because T may not be assignable.
00735                 for(typename other_type::const_iterator ci = other.begin(); ci != other.end(); ++ci) {
00736                     my_locals.push_back(*ci);
00737                 }
00738 
00739                 if(my_finit_callback) {
00740                     my_finit_callback->destroy();
00741                     my_finit_callback = 0;
00742                 }
00743                 if(my_exemplar_ptr) {
00744                     free_exemplar(my_exemplar_ptr);
00745                     my_exemplar_ptr = 0;
00746                 }
00747                 if(other.my_finit_callback) {
00748                     my_finit_callback = other.my_finit_callback->make_copy();
00749                 }
00750 
00751                 if(other.my_exemplar_ptr) {
00752                     my_exemplar_ptr = create_exemplar(other.my_exemplar_ptr->value);
00753                 }
00754             }
00755             return *this;
00756         }
00757 
00758     public:
00759 
00760         // assignment
00761         enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
00762             return internal_assign(other);
00763         }
00764 
00765         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00766         enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)
00767         {
00768             return internal_assign(other);
00769         }
00770 
00771     private:
00772 
00773         // combine_func_t has signature T(T,T) or T(const T&, const T&)
00774         template <typename combine_func_t>
00775         T internal_combine(typename internal_collection_type::const_range_type r, combine_func_t f_combine) {
00776             if(r.is_divisible()) {
00777                 typename internal_collection_type::const_range_type r2(r,split());
00778                 return f_combine(internal_combine(r2, f_combine), internal_combine(r, f_combine));
00779             }
00780             if(r.size() == 1) {
00781                 return r.begin()->value;
00782             }
00783             typename internal_collection_type::const_iterator i2 = r.begin();
00784             ++i2;
00785             return f_combine(r.begin()->value, i2->value);
00786         }
00787 
00788     public:
00789 
00790         // combine_func_t has signature T(T,T) or T(const T&, const T&)
00791         template <typename combine_func_t>
00792         T combine(combine_func_t f_combine) {
00793             if(my_locals.begin() == my_locals.end()) {
00794                 if(my_finit_callback) {
00795                     return my_finit_callback->apply();
00796                 }
00797                 return (*my_exemplar_ptr).value;
00798             }
00799             typename internal_collection_type::const_range_type r(my_locals.begin(), my_locals.end(), (size_t)2);
00800             return internal_combine(r, f_combine);
00801         }
00802 
00803         // combine_func_t has signature void(T) or void(const T&)
00804         template <typename combine_func_t>
00805         void combine_each(combine_func_t f_combine) {
00806             for(const_iterator ci = begin(); ci != end(); ++ci) {
00807                 f_combine( *ci );
00808             }
00809         }
00810     }; // enumerable_thread_specific
00811 
00812     template< typename Container >
00813     class flattened2d {
00814 
00815         // This intermediate typedef is to address issues with VC7.1 compilers
00816         typedef typename Container::value_type conval_type;
00817 
00818     public:
00819 
00821         typedef typename conval_type::size_type size_type;
00822         typedef typename conval_type::difference_type difference_type;
00823         typedef typename conval_type::allocator_type allocator_type;
00824         typedef typename conval_type::value_type value_type;
00825         typedef typename conval_type::reference reference;
00826         typedef typename conval_type::const_reference const_reference;
00827         typedef typename conval_type::pointer pointer;
00828         typedef typename conval_type::const_pointer const_pointer;
00829 
00830         typedef typename internal::segmented_iterator<Container, value_type> iterator;
00831         typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
00832 
00833         flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : 
00834             my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
00835 
00836         flattened2d( const Container &c ) : 
00837             my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
00838 
00839         iterator begin() { return iterator(*my_container) = my_begin; }
00840         iterator end() { return iterator(*my_container) = my_end; }
00841         const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
00842         const_iterator end() const { return const_iterator(*my_container) = my_end; }
00843 
00844         size_type size() const {
00845             size_type tot_size = 0;
00846             for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
00847                 tot_size += i->size();
00848             }
00849             return tot_size;
00850         }
00851 
00852     private:
00853 
00854         Container *my_container;
00855         typename Container::const_iterator my_begin;
00856         typename Container::const_iterator my_end;
00857 
00858     };
00859 
00860     template <typename Container>
00861     flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
00862         return flattened2d<Container>(c, b, e);
00863     }
00864 
00865     template <typename Container>
00866     flattened2d<Container> flatten2d(const Container &c) {
00867         return flattened2d<Container>(c);
00868     }
00869 
00870 } // namespace tbb
00871 
00872 #endif

Copyright © 2005-2009 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.