atomic.h

00001 /*
00002     Copyright 2005-2009 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_atomic_H
00022 #define __TBB_atomic_H
00023 
00024 #include <cstddef>
00025 #include "tbb_stddef.h"
00026 
00027 #if _MSC_VER 
00028 #define __TBB_LONG_LONG __int64
00029 #else
00030 #define __TBB_LONG_LONG long long
00031 #endif /* _MSC_VER */
00032 
00033 #include "tbb_machine.h"
00034 
00035 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
00036     // Workaround for overzealous compiler warnings 
00037     #pragma warning (push)
00038     #pragma warning (disable: 4244 4267)
00039 #endif
00040 
00041 namespace tbb {
00042 
00044 enum memory_semantics {
00046     __TBB_full_fence,
00048     acquire,
00050     release
00051 };
00052 
00054 namespace internal {
00055 
00056 #if __GNUC__ || __SUNPRO_CC
00057 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f  __attribute__ ((aligned(a)));
00058 #elif defined(__INTEL_COMPILER)||_MSC_VER >= 1300
00059 #define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f;
00060 #else 
00061 #error Do not know syntax for forcing alignment.
00062 #endif /* __GNUC__ */
00063 
00064 template<size_t S>
00065 struct atomic_rep;           // Primary template declared, but never defined.
00066 
00067 template<>
00068 struct atomic_rep<1> {       // Specialization
00069     typedef int8_t word;
00070     int8_t value;
00071 };
00072 template<>
00073 struct atomic_rep<2> {       // Specialization
00074     typedef int16_t word;
00075     __TBB_DECL_ATOMIC_FIELD(int16_t,value,2)
00076 };
00077 template<>
00078 struct atomic_rep<4> {       // Specialization
00079 #if _MSC_VER && __TBB_WORDSIZE==4
00080     // Work-around that avoids spurious /Wp64 warnings
00081     typedef intptr_t word;
00082 #else
00083     typedef int32_t word;
00084 #endif
00085     __TBB_DECL_ATOMIC_FIELD(int32_t,value,4)
00086 };
00087 template<>
00088 struct atomic_rep<8> {       // Specialization
00089     typedef int64_t word;
00090     __TBB_DECL_ATOMIC_FIELD(int64_t,value,8)
00091 };
00092 
00093 template<size_t Size, memory_semantics M>
00094 struct atomic_traits;        // Primary template declared, but not defined.
00095 
00096 #define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M)                         \
00097     template<> struct atomic_traits<S,M> {                               \
00098         typedef atomic_rep<S>::word word;                               \
00099         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
00100             return __TBB_CompareAndSwap##S##M(location,new_value,comparand);    \
00101         }                                                                       \
00102         inline static word fetch_and_add( volatile void* location, word addend ) { \
00103             return __TBB_FetchAndAdd##S##M(location,addend);                    \
00104         }                                                                       \
00105         inline static word fetch_and_store( volatile void* location, word value ) {\
00106             return __TBB_FetchAndStore##S##M(location,value);                   \
00107         }                                                                       \
00108     };
00109 
00110 #define __TBB_DECL_ATOMIC_PRIMITIVES(S)                                  \
00111     template<memory_semantics M>                                         \
00112     struct atomic_traits<S,M> {                                          \
00113         typedef atomic_rep<S>::word word;                               \
00114         inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\
00115             return __TBB_CompareAndSwap##S(location,new_value,comparand);       \
00116         }                                                                       \
00117         inline static word fetch_and_add( volatile void* location, word addend ) { \
00118             return __TBB_FetchAndAdd##S(location,addend);                       \
00119         }                                                                       \
00120         inline static word fetch_and_store( volatile void* location, word value ) {\
00121             return __TBB_FetchAndStore##S(location,value);                      \
00122         }                                                                       \
00123     };
00124 
00125 #if __TBB_DECL_FENCED_ATOMICS
00126 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,__TBB_full_fence)
00127 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,__TBB_full_fence)
00128 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,__TBB_full_fence)
00129 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,__TBB_full_fence)
00130 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire)
00131 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire)
00132 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire)
00133 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire)
00134 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release)
00135 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release)
00136 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release)
00137 __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release)
00138 #else
00139 __TBB_DECL_ATOMIC_PRIMITIVES(1)
00140 __TBB_DECL_ATOMIC_PRIMITIVES(2)
00141 __TBB_DECL_ATOMIC_PRIMITIVES(4)
00142 __TBB_DECL_ATOMIC_PRIMITIVES(8)
00143 #endif
00144 
00146 
00148 #define __TBB_MINUS_ONE(T) (T(T(0)-T(1)))
00149 
00151 
00153 template<typename T>
00154 struct atomic_impl {
00155 protected:
00156     atomic_rep<sizeof(T)> rep;
00157 private:
00159     union converter {
00160         T value;
00161         typename atomic_rep<sizeof(T)>::word bits;
00162     };
00163 public:
00164     typedef T value_type;
00165 
00166     template<memory_semantics M>
00167     value_type fetch_and_store( value_type value ) {
00168         converter u, w;
00169         u.value = value;
00170         w.bits = internal::atomic_traits<sizeof(value_type),M>::fetch_and_store(&rep.value,u.bits);
00171         return w.value;
00172     }
00173 
00174     value_type fetch_and_store( value_type value ) {
00175         return fetch_and_store<__TBB_full_fence>(value);
00176     }
00177 
00178     template<memory_semantics M>
00179     value_type compare_and_swap( value_type value, value_type comparand ) {
00180         converter u, v, w;
00181         u.value = value;
00182         v.value = comparand;
00183         w.bits = internal::atomic_traits<sizeof(value_type),M>::compare_and_swap(&rep.value,u.bits,v.bits);
00184         return w.value;
00185     }
00186 
00187     value_type compare_and_swap( value_type value, value_type comparand ) {
00188         return compare_and_swap<__TBB_full_fence>(value,comparand);
00189     }
00190 
00191     operator value_type() const volatile {                // volatile qualifier here for backwards compatibility 
00192         converter w;
00193         w.bits = __TBB_load_with_acquire( rep.value );
00194         return w.value;
00195     }
00196 
00197 protected:
00198     value_type store_with_release( value_type rhs ) {
00199         converter u;
00200         u.value = rhs;
00201         __TBB_store_with_release(rep.value,u.bits);
00202         return rhs;
00203     }
00204 };
00205 
00207 
00210 template<typename I, typename D, typename StepType>
00211 struct atomic_impl_with_arithmetic: atomic_impl<I> {
00212 public:
00213     typedef I value_type;
00214 
00215     template<memory_semantics M>
00216     value_type fetch_and_add( D addend ) {
00217         return value_type(internal::atomic_traits<sizeof(value_type),M>::fetch_and_add( &this->rep.value, addend*sizeof(StepType) ));
00218     }
00219 
00220     value_type fetch_and_add( D addend ) {
00221         return fetch_and_add<__TBB_full_fence>(addend);
00222     }
00223 
00224     template<memory_semantics M>
00225     value_type fetch_and_increment() {
00226         return fetch_and_add<M>(1);
00227     }
00228 
00229     value_type fetch_and_increment() {
00230         return fetch_and_add(1);
00231     }
00232 
00233     template<memory_semantics M>
00234     value_type fetch_and_decrement() {
00235         return fetch_and_add<M>(__TBB_MINUS_ONE(D));
00236     }
00237 
00238     value_type fetch_and_decrement() {
00239         return fetch_and_add(__TBB_MINUS_ONE(D));
00240     }
00241 
00242 public:
00243     value_type operator+=( D addend ) {
00244         return fetch_and_add(addend)+addend;
00245     }
00246 
00247     value_type operator-=( D addend ) {
00248         // Additive inverse of addend computed using binary minus,
00249         // instead of unary minus, for sake of avoiding compiler warnings.
00250         return operator+=(D(0)-addend);    
00251     }
00252 
00253     value_type operator++() {
00254         return fetch_and_add(1)+1;
00255     }
00256 
00257     value_type operator--() {
00258         return fetch_and_add(__TBB_MINUS_ONE(D))-1;
00259     }
00260 
00261     value_type operator++(int) {
00262         return fetch_and_add(1);
00263     }
00264 
00265     value_type operator--(int) {
00266         return fetch_and_add(__TBB_MINUS_ONE(D));
00267     }
00268 };
00269 
00270 #if __TBB_WORDSIZE == 4
00271 // Plaforms with 32-bit hardware require special effort for 64-bit loads and stores.
00272 #if defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400
00273 
00274 template<>
00275 inline atomic_impl<__TBB_LONG_LONG>::operator atomic_impl<__TBB_LONG_LONG>::value_type() const volatile {
00276     return __TBB_Load8(&rep.value);
00277 }
00278 
00279 template<>
00280 inline atomic_impl<unsigned __TBB_LONG_LONG>::operator atomic_impl<unsigned __TBB_LONG_LONG>::value_type() const volatile {
00281     return __TBB_Load8(&rep.value);
00282 }
00283 
00284 template<>
00285 inline atomic_impl<__TBB_LONG_LONG>::value_type atomic_impl<__TBB_LONG_LONG>::store_with_release( value_type rhs ) {
00286     __TBB_Store8(&rep.value,rhs);
00287     return rhs;
00288 }
00289 
00290 template<>
00291 inline atomic_impl<unsigned __TBB_LONG_LONG>::value_type atomic_impl<unsigned __TBB_LONG_LONG>::store_with_release( value_type rhs ) {
00292     __TBB_Store8(&rep.value,rhs);
00293     return rhs;
00294 }
00295 
00296 #endif /* defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 */
00297 #endif /* __TBB_WORDSIZE==4 */
00298 
00299 } /* Internal */
00301 
00303 
00305 template<typename T>
00306 struct atomic: internal::atomic_impl<T> {
00307     T operator=( T rhs ) {
00308         // "this" required here in strict ISO C++ because store_with_release is a dependent name
00309         return this->store_with_release(rhs);
00310     }
00311     atomic<T>& operator=( const atomic<T>& rhs ) {this->store_with_release(rhs); return *this;}
00312 };
00313 
00314 #define __TBB_DECL_ATOMIC(T) \
00315     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
00316         T operator=( T rhs ) {return store_with_release(rhs);}  \
00317         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
00318     };
00319 
00320 #if defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400
00321 __TBB_DECL_ATOMIC(__TBB_LONG_LONG)
00322 __TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG)
00323 #else
00324 // Some old versions of MVSC cannot correctly compile templates with "long long".
00325 #endif /* defined(__INTEL_COMPILER)||!defined(_MSC_VER)||_MSC_VER>=1400 */
00326 
00327 __TBB_DECL_ATOMIC(long)
00328 __TBB_DECL_ATOMIC(unsigned long)
00329 
00330 #if defined(_MSC_VER) && __TBB_WORDSIZE==4
00331 /* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. 
00332    It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) 
00333    with an operator=(U) that explicitly converts the U to a T.  Types T and U should be
00334    type synonyms on the platform.  Type U should be the wider variant of T from the
00335    perspective of /Wp64. */
00336 #define __TBB_DECL_ATOMIC_ALT(T,U) \
00337     template<> struct atomic<T>: internal::atomic_impl_with_arithmetic<T,T,char> {  \
00338         T operator=( U rhs ) {return store_with_release(T(rhs));}  \
00339         atomic<T>& operator=( const atomic<T>& rhs ) {store_with_release(rhs); return *this;}  \
00340     };
00341 __TBB_DECL_ATOMIC_ALT(unsigned,size_t)
00342 __TBB_DECL_ATOMIC_ALT(int,ptrdiff_t)
00343 #else
00344 __TBB_DECL_ATOMIC(unsigned)
00345 __TBB_DECL_ATOMIC(int)
00346 #endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */
00347 
00348 __TBB_DECL_ATOMIC(unsigned short)
00349 __TBB_DECL_ATOMIC(short)
00350 __TBB_DECL_ATOMIC(char)
00351 __TBB_DECL_ATOMIC(signed char)
00352 __TBB_DECL_ATOMIC(unsigned char)
00353 
00354 #if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED) 
00355 __TBB_DECL_ATOMIC(wchar_t)
00356 #endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */
00357 
00359 template<typename T> struct atomic<T*>: internal::atomic_impl_with_arithmetic<T*,ptrdiff_t,T> {
00360     T* operator=( T* rhs ) {
00361         // "this" required here in strict ISO C++ because store_with_release is a dependent name
00362         return this->store_with_release(rhs);
00363     }
00364     atomic<T*>& operator=( const atomic<T*>& rhs ) {
00365         this->store_with_release(rhs); return *this;
00366     }
00367     T* operator->() const {
00368         return (*this);
00369     }
00370 };
00371 
00373 template<> struct atomic<void*>: internal::atomic_impl<void*> {
00374     void* operator=( void* rhs ) {
00375         // "this" required here in strict ISO C++ because store_with_release is a dependent name
00376         return this->store_with_release(rhs);
00377     }
00378     atomic<void*>& operator=( const atomic<void*>& rhs ) {
00379         this->store_with_release(rhs); return *this;
00380     }
00381 };
00382 
00383 } // namespace tbb
00384 
00385 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
00386     #pragma warning (pop)
00387 #endif // warnings 4244, 4267 are back
00388 
00389 #endif /* __TBB_atomic_H */

Copyright © 2005-2009 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.