Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
tbb_machine.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_machine_H
18 #define __TBB_machine_H
19 
113 #include "tbb_stddef.h"
114 
115 namespace tbb {
116 namespace internal { //< @cond INTERNAL
117 
119 // Overridable helpers declarations
120 //
121 // A machine/*.h file may choose to define these templates, otherwise it must
122 // request default implementation by setting appropriate __TBB_USE_GENERIC_XXX macro(s).
123 //
124 template <typename T, std::size_t S>
125 struct machine_load_store;
126 
127 template <typename T, std::size_t S>
128 struct machine_load_store_relaxed;
129 
130 template <typename T, std::size_t S>
131 struct machine_load_store_seq_cst;
132 //
133 // End of overridable helpers declarations
135 
136 template<size_t S> struct atomic_selector;
137 
138 template<> struct atomic_selector<1> {
139  typedef int8_t word;
140  inline static word fetch_store ( volatile void* location, word value );
141 };
142 
143 template<> struct atomic_selector<2> {
144  typedef int16_t word;
145  inline static word fetch_store ( volatile void* location, word value );
146 };
147 
148 template<> struct atomic_selector<4> {
149 #if _MSC_VER && !_WIN64
150  // Work-around that avoids spurious /Wp64 warnings
151  typedef intptr_t word;
152 #else
153  typedef int32_t word;
154 #endif
155  inline static word fetch_store ( volatile void* location, word value );
156 };
157 
158 template<> struct atomic_selector<8> {
159  typedef int64_t word;
160  inline static word fetch_store ( volatile void* location, word value );
161 };
162 
163 }} //< namespaces internal @endcond, tbb
164 
165 #define __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(M) \
166  inline void __TBB_machine_generic_store8##M(volatile void *ptr, int64_t value) { \
167  for(;;) { \
168  int64_t result = *(volatile int64_t *)ptr; \
169  if( __TBB_machine_cmpswp8##M(ptr,value,result)==result ) break; \
170  } \
171  } \
172 
173 #define __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(M) \
174  inline int64_t __TBB_machine_generic_load8##M(const volatile void *ptr) { \
175  /* Comparand and new value may be anything, they only must be equal, and */ \
176  /* the value should have a low probability to be actually found in 'location'.*/ \
177  const int64_t anyvalue = 2305843009213693951LL; \
178  return __TBB_machine_cmpswp8##M(const_cast<volatile void *>(ptr),anyvalue,anyvalue); \
179  } \
180 
181 // The set of allowed values for __TBB_ENDIANNESS (see above for details)
182 #define __TBB_ENDIAN_UNSUPPORTED -1
183 #define __TBB_ENDIAN_LITTLE 0
184 #define __TBB_ENDIAN_BIG 1
185 #define __TBB_ENDIAN_DETECT 2
186 
187 #if _WIN32||_WIN64
188 
189 #ifdef _MANAGED
190 #pragma managed(push, off)
191 #endif
192 
193  #if __MINGW64__ || __MINGW32__
194  extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void );
195  #define __TBB_Yield() SwitchToThread()
196  #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT)
197  #include "machine/gcc_generic.h"
198  #elif __MINGW64__
199  #include "machine/linux_intel64.h"
200  #elif __MINGW32__
201  #include "machine/linux_ia32.h"
202  #endif
203  #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT)
204  #include "machine/icc_generic.h"
205  #elif defined(_M_IX86) && !defined(__TBB_WIN32_USE_CL_BUILTINS)
206  #include "machine/windows_ia32.h"
207  #elif defined(_M_X64)
208  #include "machine/windows_intel64.h"
209  #elif defined(_M_ARM) || defined(__TBB_WIN32_USE_CL_BUILTINS)
210  #include "machine/msvc_armv7.h"
211  #endif
212 
213 #ifdef _MANAGED
214 #pragma managed(pop)
215 #endif
216 
217 #elif __TBB_DEFINE_MIC
218 
219  #include "machine/mic_common.h"
220  #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT)
221  #include "machine/icc_generic.h"
222  #else
223  #include "machine/linux_intel64.h"
224  #endif
225 
226 #elif __linux__ || __FreeBSD__ || __NetBSD__ || __OpenBSD__
227 
228  #ifndef TBB_USE_GCC_BUILTINS
229  #define TBB_USE_GCC_BUILTINS 1
230  #endif
231  #if (TBB_USE_GCC_BUILTINS && __TBB_GCC_BUILTIN_ATOMICS_PRESENT)
232  #include "machine/gcc_generic.h"
233  #elif (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT)
234  #include "machine/icc_generic.h"
235  #elif __i386__
236  #include "machine/linux_ia32.h"
237  #elif __x86_64__
238  #include "machine/linux_intel64.h"
239  #elif __ia64__
240  #include "machine/linux_ia64.h"
241  #elif __powerpc__
242  #include "machine/mac_ppc.h"
243  #elif __ARM_ARCH_7A__ || __aarch64__
244  #include "machine/gcc_arm.h"
245  #elif __TBB_GCC_BUILTIN_ATOMICS_PRESENT
246  #include "machine/gcc_generic.h"
247  #endif
248  #include "machine/linux_common.h"
249 
250 #elif __APPLE__
251  //TODO: TBB_USE_GCC_BUILTINS is not used for Mac, Sun, Aix
252  #if (TBB_USE_ICC_BUILTINS && __TBB_ICC_BUILTIN_ATOMICS_PRESENT)
253  #include "machine/icc_generic.h"
254  #elif __TBB_x86_32
255  #include "machine/linux_ia32.h"
256  #elif __TBB_x86_64
257  #include "machine/linux_intel64.h"
258  #elif __POWERPC__
259  #include "machine/mac_ppc.h"
260  #endif
261  #include "machine/macos_common.h"
262 
263 #elif _AIX
264 
265  #include "machine/ibm_aix51.h"
266 
267 #elif __sun || __SUNPRO_CC
268 
269  #define __asm__ asm
270  #define __volatile__ volatile
271 
272  #if __i386 || __i386__
273  #include "machine/linux_ia32.h"
274  #elif __x86_64__
275  #include "machine/linux_intel64.h"
276  #elif __sparc
277  #include "machine/sunos_sparc.h"
278  #endif
279  #include <sched.h>
280 
281  #define __TBB_Yield() sched_yield()
282 
283 #endif /* OS selection */
284 
285 #ifndef __TBB_64BIT_ATOMICS
286  #define __TBB_64BIT_ATOMICS 1
287 #endif
288 
289 //TODO: replace usage of these functions with usage of tbb::atomic, and then remove them
290 //TODO: map functions with W suffix to use cast to tbb::atomic and according op, i.e. as_atomic().op()
291 // Special atomic functions
292 #if __TBB_USE_FENCED_ATOMICS
293  #define __TBB_machine_cmpswp1 __TBB_machine_cmpswp1full_fence
294  #define __TBB_machine_cmpswp2 __TBB_machine_cmpswp2full_fence
295  #define __TBB_machine_cmpswp4 __TBB_machine_cmpswp4full_fence
296  #define __TBB_machine_cmpswp8 __TBB_machine_cmpswp8full_fence
297 
298  #if __TBB_WORDSIZE==8
299  #define __TBB_machine_fetchadd8 __TBB_machine_fetchadd8full_fence
300  #define __TBB_machine_fetchstore8 __TBB_machine_fetchstore8full_fence
301  #define __TBB_FetchAndAddWrelease(P,V) __TBB_machine_fetchadd8release(P,V)
302  #define __TBB_FetchAndIncrementWacquire(P) __TBB_machine_fetchadd8acquire(P,1)
303  #define __TBB_FetchAndDecrementWrelease(P) __TBB_machine_fetchadd8release(P,(-1))
304  #else
305  #define __TBB_machine_fetchadd4 __TBB_machine_fetchadd4full_fence
306  #define __TBB_machine_fetchstore4 __TBB_machine_fetchstore4full_fence
307  #define __TBB_FetchAndAddWrelease(P,V) __TBB_machine_fetchadd4release(P,V)
308  #define __TBB_FetchAndIncrementWacquire(P) __TBB_machine_fetchadd4acquire(P,1)
309  #define __TBB_FetchAndDecrementWrelease(P) __TBB_machine_fetchadd4release(P,(-1))
310  #endif /* __TBB_WORDSIZE==4 */
311 #else /* !__TBB_USE_FENCED_ATOMICS */
312  #define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V)
313  #define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1)
314  #define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,(-1))
315 #endif /* !__TBB_USE_FENCED_ATOMICS */
316 
317 #if __TBB_WORDSIZE==4
318  #define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C)
319  #define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd4(P,V)
320  #define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore4(P,V)
321 #elif __TBB_WORDSIZE==8
322  #if __TBB_USE_GENERIC_DWORD_LOAD_STORE || __TBB_USE_GENERIC_DWORD_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_STORE
323  #error These macros should only be used on 32-bit platforms.
324  #endif
325 
326  #define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C)
327  #define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd8(P,V)
328  #define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore8(P,V)
329 #else /* __TBB_WORDSIZE != 8 */
330  #error Unsupported machine word size.
331 #endif /* __TBB_WORDSIZE */
332 
333 #ifndef __TBB_Pause
334  inline void __TBB_Pause(int32_t) {
335  __TBB_Yield();
336  }
337 #endif
338 
339 namespace tbb {
340 
343 
344 namespace internal { //< @cond INTERNAL
345 
347 
350 
352  static const int32_t LOOPS_BEFORE_YIELD = 16;
353  int32_t count;
354 public:
355  // In many cases, an object of this type is initialized eagerly on hot path,
356  // as in for(atomic_backoff b; ; b.pause()) { /*loop body*/ }
357  // For this reason, the construction cost must be very small!
359  // This constructor pauses immediately; do not use on hot paths!
360  atomic_backoff( bool ) : count(1) { pause(); }
361 
363  void pause() {
364  if( count<=LOOPS_BEFORE_YIELD ) {
366  // Pause twice as long the next time.
367  count*=2;
368  } else {
369  // Pause is so long that we might as well yield CPU to scheduler.
370  __TBB_Yield();
371  }
372  }
373 
375  bool bounded_pause() {
377  if( count<LOOPS_BEFORE_YIELD ) {
378  // Pause twice as long the next time.
379  count*=2;
380  return true;
381  } else {
382  return false;
383  }
384  }
385 
386  void reset() {
387  count = 1;
388  }
389 };
390 
392 
393 template<typename T, typename U>
394 void spin_wait_while_eq( const volatile T& location, U value ) {
395  atomic_backoff backoff;
396  while( location==value ) backoff.pause();
397 }
398 
400 
401 template<typename T, typename U>
402 void spin_wait_until_eq( const volatile T& location, const U value ) {
403  atomic_backoff backoff;
404  while( location!=value ) backoff.pause();
405 }
406 
407 template <typename predicate_type>
408 void spin_wait_while(predicate_type condition){
409  atomic_backoff backoff;
410  while( condition() ) backoff.pause();
411 }
412 
414 // Generic compare-and-swap applied to only a part of a machine word.
415 //
416 #ifndef __TBB_ENDIANNESS
417 #define __TBB_ENDIANNESS __TBB_ENDIAN_DETECT
418 #endif
419 
420 #if __TBB_USE_GENERIC_PART_WORD_CAS && __TBB_ENDIANNESS==__TBB_ENDIAN_UNSUPPORTED
421 #error Generic implementation of part-word CAS may not be used with __TBB_ENDIAN_UNSUPPORTED
422 #endif
423 
424 #if __TBB_ENDIANNESS!=__TBB_ENDIAN_UNSUPPORTED
425 //
426 // This function is the only use of __TBB_ENDIANNESS.
427 // The following restrictions/limitations apply for this operation:
428 // - T must be an integer type of at most 4 bytes for the casts and calculations to work
429 // - T must also be less than 4 bytes to avoid compiler warnings when computing mask
430 // (and for the operation to be useful at all, so no workaround is applied)
431 // - the architecture must consistently use either little-endian or big-endian (same for all locations)
432 //
433 // TODO: static_assert for the type requirements stated above
434 template<typename T>
435 inline T __TBB_MaskedCompareAndSwap (volatile T * const ptr, const T value, const T comparand ) {
436  struct endianness{ static bool is_big_endian(){
437  #if __TBB_ENDIANNESS==__TBB_ENDIAN_DETECT
438  const uint32_t probe = 0x03020100;
439  return (((const char*)(&probe))[0]==0x03);
440  #elif __TBB_ENDIANNESS==__TBB_ENDIAN_BIG || __TBB_ENDIANNESS==__TBB_ENDIAN_LITTLE
442  #else
443  #error Unexpected value of __TBB_ENDIANNESS
444  #endif
445  }};
446 
447  const uint32_t byte_offset = (uint32_t) ((uintptr_t)ptr & 0x3);
448  volatile uint32_t * const aligned_ptr = (uint32_t*)((uintptr_t)ptr - byte_offset );
449 
450  // location of T within uint32_t for a C++ shift operation
451  const uint32_t bits_to_shift = 8*(endianness::is_big_endian() ? (4 - sizeof(T) - (byte_offset)) : byte_offset);
452  const uint32_t mask = (((uint32_t)1<<(sizeof(T)*8)) - 1 )<<bits_to_shift;
453  // for signed T, any sign extension bits in cast value/comparand are immediately clipped by mask
454  const uint32_t shifted_comparand = ((uint32_t)comparand << bits_to_shift)&mask;
455  const uint32_t shifted_value = ((uint32_t)value << bits_to_shift)&mask;
456 
457  for( atomic_backoff b;;b.pause() ) {
458  const uint32_t surroundings = *aligned_ptr & ~mask ; // may have changed during the pause
459  const uint32_t big_comparand = surroundings | shifted_comparand ;
460  const uint32_t big_value = surroundings | shifted_value ;
461  // __TBB_machine_cmpswp4 presumed to have full fence.
462  // Cast shuts up /Wp64 warning
463  const uint32_t big_result = (uint32_t)__TBB_machine_cmpswp4( aligned_ptr, big_value, big_comparand );
464  if( big_result == big_comparand // CAS succeeded
465  || ((big_result ^ big_comparand) & mask) != 0) // CAS failed and the bits of interest have changed
466  {
467  return T((big_result & mask) >> bits_to_shift);
468  }
469  else continue; // CAS failed but the bits of interest were not changed
470  }
471 }
472 #endif // __TBB_ENDIANNESS!=__TBB_ENDIAN_UNSUPPORTED
473 
475 template<size_t S, typename T>
476 inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T comparand );
477 
478 template<>
479 inline int8_t __TBB_CompareAndSwapGeneric <1,int8_t> (volatile void *ptr, int8_t value, int8_t comparand ) {
480 #if __TBB_USE_GENERIC_PART_WORD_CAS
481  return __TBB_MaskedCompareAndSwap<int8_t>((volatile int8_t *)ptr,value,comparand);
482 #else
483  return __TBB_machine_cmpswp1(ptr,value,comparand);
484 #endif
485 }
486 
487 template<>
488 inline int16_t __TBB_CompareAndSwapGeneric <2,int16_t> (volatile void *ptr, int16_t value, int16_t comparand ) {
489 #if __TBB_USE_GENERIC_PART_WORD_CAS
490  return __TBB_MaskedCompareAndSwap<int16_t>((volatile int16_t *)ptr,value,comparand);
491 #else
492  return __TBB_machine_cmpswp2(ptr,value,comparand);
493 #endif
494 }
495 
496 template<>
497 inline int32_t __TBB_CompareAndSwapGeneric <4,int32_t> (volatile void *ptr, int32_t value, int32_t comparand ) {
498  // Cast shuts up /Wp64 warning
499  return (int32_t)__TBB_machine_cmpswp4(ptr,value,comparand);
500 }
501 
502 #if __TBB_64BIT_ATOMICS
503 template<>
504 inline int64_t __TBB_CompareAndSwapGeneric <8,int64_t> (volatile void *ptr, int64_t value, int64_t comparand ) {
505  return __TBB_machine_cmpswp8(ptr,value,comparand);
506 }
507 #endif
508 
509 template<size_t S, typename T>
510 inline T __TBB_FetchAndAddGeneric (volatile void *ptr, T addend) {
511  T result;
512  for( atomic_backoff b;;b.pause() ) {
513  result = *reinterpret_cast<volatile T *>(ptr);
514  // __TBB_CompareAndSwapGeneric presumed to have full fence.
515  if( __TBB_CompareAndSwapGeneric<S,T> ( ptr, result+addend, result )==result )
516  break;
517  }
518  return result;
519 }
520 
521 template<size_t S, typename T>
522 inline T __TBB_FetchAndStoreGeneric (volatile void *ptr, T value) {
523  T result;
524  for( atomic_backoff b;;b.pause() ) {
525  result = *reinterpret_cast<volatile T *>(ptr);
526  // __TBB_CompareAndSwapGeneric presumed to have full fence.
527  if( __TBB_CompareAndSwapGeneric<S,T> ( ptr, value, result )==result )
528  break;
529  }
530  return result;
531 }
532 
533 #if __TBB_USE_GENERIC_PART_WORD_CAS
534 #define __TBB_machine_cmpswp1 tbb::internal::__TBB_CompareAndSwapGeneric<1,int8_t>
535 #define __TBB_machine_cmpswp2 tbb::internal::__TBB_CompareAndSwapGeneric<2,int16_t>
536 #endif
537 
538 #if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_PART_WORD_FETCH_ADD
539 #define __TBB_machine_fetchadd1 tbb::internal::__TBB_FetchAndAddGeneric<1,int8_t>
540 #define __TBB_machine_fetchadd2 tbb::internal::__TBB_FetchAndAddGeneric<2,int16_t>
541 #endif
542 
543 #if __TBB_USE_GENERIC_FETCH_ADD
544 #define __TBB_machine_fetchadd4 tbb::internal::__TBB_FetchAndAddGeneric<4,int32_t>
545 #endif
546 
547 #if __TBB_USE_GENERIC_FETCH_ADD || __TBB_USE_GENERIC_DWORD_FETCH_ADD
548 #define __TBB_machine_fetchadd8 tbb::internal::__TBB_FetchAndAddGeneric<8,int64_t>
549 #endif
550 
551 #if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_PART_WORD_FETCH_STORE
552 #define __TBB_machine_fetchstore1 tbb::internal::__TBB_FetchAndStoreGeneric<1,int8_t>
553 #define __TBB_machine_fetchstore2 tbb::internal::__TBB_FetchAndStoreGeneric<2,int16_t>
554 #endif
555 
556 #if __TBB_USE_GENERIC_FETCH_STORE
557 #define __TBB_machine_fetchstore4 tbb::internal::__TBB_FetchAndStoreGeneric<4,int32_t>
558 #endif
559 
560 #if __TBB_USE_GENERIC_FETCH_STORE || __TBB_USE_GENERIC_DWORD_FETCH_STORE
561 #define __TBB_machine_fetchstore8 tbb::internal::__TBB_FetchAndStoreGeneric<8,int64_t>
562 #endif
563 
564 #if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE
565 #define __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(S) \
566  atomic_selector<S>::word atomic_selector<S>::fetch_store ( volatile void* location, word value ) { \
567  return __TBB_machine_fetchstore##S( location, value ); \
568  }
569 
570 __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(1)
571 __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(2)
572 __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(4)
573 __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE(8)
574 
575 #undef __TBB_MACHINE_DEFINE_ATOMIC_SELECTOR_FETCH_STORE
576 #endif /* __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */
577 
578 #if __TBB_USE_GENERIC_DWORD_LOAD_STORE
579 /*TODO: find a more elegant way to handle function names difference*/
580 #if ! __TBB_USE_FENCED_ATOMICS
581  /* This name forwarding is needed for generic implementation of
582  * load8/store8 defined below (via macro) to pick the right CAS function*/
583  #define __TBB_machine_cmpswp8full_fence __TBB_machine_cmpswp8
584 #endif
587 
588 #if ! __TBB_USE_FENCED_ATOMICS
589  #undef __TBB_machine_cmpswp8full_fence
590 #endif
591 
592 #define __TBB_machine_store8 tbb::internal::__TBB_machine_generic_store8full_fence
593 #define __TBB_machine_load8 tbb::internal::__TBB_machine_generic_load8full_fence
594 #endif /* __TBB_USE_GENERIC_DWORD_LOAD_STORE */
595 
596 #if __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE
597 
607 template <typename T, size_t S>
608 struct machine_load_store {
609  static T load_with_acquire ( const volatile T& location ) {
610  T to_return = location;
612  return to_return;
613  }
614  static void store_with_release ( volatile T &location, T value ) {
616  location = value;
617  }
618 };
619 
620 //in general, plain load and store of 32bit compiler is not atomic for 64bit types
621 #if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS
622 template <typename T>
623 struct machine_load_store<T,8> {
624  static T load_with_acquire ( const volatile T& location ) {
625  return (T)__TBB_machine_load8( (const volatile void*)&location );
626  }
627  static void store_with_release ( volatile T& location, T value ) {
628  __TBB_machine_store8( (volatile void*)&location, (int64_t)value );
629  }
630 };
631 #endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */
632 #endif /* __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE */
633 
634 #if __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE
635 template <typename T, size_t S>
636 struct machine_load_store_seq_cst {
637  static T load ( const volatile T& location ) {
640  }
641 #if __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE
642  static void store ( volatile T &location, T value ) {
643  atomic_selector<S>::fetch_store( (volatile void*)&location, (typename atomic_selector<S>::word)value );
644  }
645 #else /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */
646  static void store ( volatile T &location, T value ) {
649  }
650 #endif /* !__TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE */
651 };
652 
653 #if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS
654 
656 template <typename T>
657 struct machine_load_store_seq_cst<T,8> {
658  static T load ( const volatile T& location ) {
659  // Comparand and new value may be anything, they only must be equal, and
660  // the value should have a low probability to be actually found in 'location'.
661  const int64_t anyvalue = 2305843009213693951LL;
662  return __TBB_machine_cmpswp8( (volatile void*)const_cast<volatile T*>(&location), anyvalue, anyvalue );
663  }
664  static void store ( volatile T &location, T value ) {
665 #if __TBB_GCC_VERSION >= 40702
666 #pragma GCC diagnostic push
667 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
668 #endif
669  // An atomic initialization leads to reading of uninitialized memory
670  int64_t result = (volatile int64_t&)location;
671 #if __TBB_GCC_VERSION >= 40702
672 #pragma GCC diagnostic pop
673 #endif
674  while ( __TBB_machine_cmpswp8((volatile void*)&location, (int64_t)value, result) != result )
675  result = (volatile int64_t&)location;
676  }
677 };
678 #endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */
679 #endif /*__TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE */
680 
681 #if __TBB_USE_GENERIC_RELAXED_LOAD_STORE
682 // Relaxed operations add volatile qualifier to prevent compiler from optimizing them out.
686 template <typename T, size_t S>
687 struct machine_load_store_relaxed {
688  static inline T load ( const volatile T& location ) {
689  return location;
690  }
691  static inline void store ( volatile T& location, T value ) {
692  location = value;
693  }
694 };
695 
696 #if __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS
697 template <typename T>
698 struct machine_load_store_relaxed<T,8> {
699  static inline T load ( const volatile T& location ) {
700  return (T)__TBB_machine_load8( (const volatile void*)&location );
701  }
702  static inline void store ( volatile T& location, T value ) {
703  __TBB_machine_store8( (volatile void*)&location, (int64_t)value );
704  }
705 };
706 #endif /* __TBB_WORDSIZE==4 && __TBB_64BIT_ATOMICS */
707 #endif /* __TBB_USE_GENERIC_RELAXED_LOAD_STORE */
708 
709 #undef __TBB_WORDSIZE //this macro is forbidden to use outside of atomic machinery
710 
711 template<typename T>
712 inline T __TBB_load_with_acquire(const volatile T &location) {
713  return machine_load_store<T,sizeof(T)>::load_with_acquire( location );
714 }
715 template<typename T, typename V>
716 inline void __TBB_store_with_release(volatile T& location, V value) {
717  machine_load_store<T,sizeof(T)>::store_with_release( location, T(value) );
718 }
720 inline void __TBB_store_with_release(volatile size_t& location, size_t value) {
721  machine_load_store<size_t,sizeof(size_t)>::store_with_release( location, value );
722 }
723 
724 template<typename T>
725 inline T __TBB_load_full_fence(const volatile T &location) {
726  return machine_load_store_seq_cst<T,sizeof(T)>::load( location );
727 }
728 template<typename T, typename V>
729 inline void __TBB_store_full_fence(volatile T& location, V value) {
730  machine_load_store_seq_cst<T,sizeof(T)>::store( location, T(value) );
731 }
733 inline void __TBB_store_full_fence(volatile size_t& location, size_t value) {
734  machine_load_store_seq_cst<size_t,sizeof(size_t)>::store( location, value );
735 }
736 
737 template<typename T>
738 inline T __TBB_load_relaxed (const volatile T& location) {
739  return machine_load_store_relaxed<T,sizeof(T)>::load( const_cast<T&>(location) );
740 }
741 template<typename T, typename V>
742 inline void __TBB_store_relaxed ( volatile T& location, V value ) {
743  machine_load_store_relaxed<T,sizeof(T)>::store( const_cast<T&>(location), T(value) );
744 }
746 inline void __TBB_store_relaxed ( volatile size_t& location, size_t value ) {
747  machine_load_store_relaxed<size_t,sizeof(size_t)>::store( const_cast<size_t&>(location), value );
748 }
749 
750 // Macro __TBB_TypeWithAlignmentAtLeastAsStrict(T) should be a type with alignment at least as
751 // strict as type T. The type should have a trivial default constructor and destructor, so that
752 // arrays of that type can be declared without initializers.
753 // It is correct (but perhaps a waste of space) if __TBB_TypeWithAlignmentAtLeastAsStrict(T) expands
754 // to a type bigger than T.
755 // The default definition here works on machines where integers are naturally aligned and the
756 // strictest alignment is 64.
757 #ifndef __TBB_TypeWithAlignmentAtLeastAsStrict
758 
759 #if __TBB_ALIGNAS_PRESENT
760 
761 // Use C++11 keywords alignas and alignof
762 #define __TBB_DefineTypeWithAlignment(PowerOf2) \
763 struct alignas(PowerOf2) __TBB_machine_type_with_alignment_##PowerOf2 { \
764  uint32_t member[PowerOf2/sizeof(uint32_t)]; \
765 };
766 #define __TBB_alignof(T) alignof(T)
767 
768 #elif __TBB_ATTRIBUTE_ALIGNED_PRESENT
769 
770 #define __TBB_DefineTypeWithAlignment(PowerOf2) \
771 struct __TBB_machine_type_with_alignment_##PowerOf2 { \
772  uint32_t member[PowerOf2/sizeof(uint32_t)]; \
773 } __attribute__((aligned(PowerOf2)));
774 #define __TBB_alignof(T) __alignof__(T)
775 
776 #elif __TBB_DECLSPEC_ALIGN_PRESENT
777 
778 #define __TBB_DefineTypeWithAlignment(PowerOf2) \
779 __declspec(align(PowerOf2)) \
780 struct __TBB_machine_type_with_alignment_##PowerOf2 { \
781  uint32_t member[PowerOf2/sizeof(uint32_t)]; \
782 };
783 #define __TBB_alignof(T) __alignof(T)
784 
785 #else /* A compiler with unknown syntax for data alignment */
786 #error Must define __TBB_TypeWithAlignmentAtLeastAsStrict(T)
787 #endif
788 
789 /* Now declare types aligned to useful powers of two */
790 __TBB_DefineTypeWithAlignment(8) // i386 ABI says that uint64_t is aligned on 4 bytes
794 
795 typedef __TBB_machine_type_with_alignment_64 __TBB_machine_type_with_strictest_alignment;
796 
797 // Primary template is a declaration of incomplete type so that it fails with unknown alignments
798 template<size_t N> struct type_with_alignment;
799 
800 // Specializations for allowed alignments
801 template<> struct type_with_alignment<1> { char member; };
802 template<> struct type_with_alignment<2> { uint16_t member; };
803 template<> struct type_with_alignment<4> { uint32_t member; };
804 template<> struct type_with_alignment<8> { __TBB_machine_type_with_alignment_8 member; };
805 template<> struct type_with_alignment<16> {__TBB_machine_type_with_alignment_16 member; };
806 template<> struct type_with_alignment<32> {__TBB_machine_type_with_alignment_32 member; };
807 template<> struct type_with_alignment<64> {__TBB_machine_type_with_alignment_64 member; };
808 
809 #if __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN
810 
813 template<size_t Size, typename T>
814 struct work_around_alignment_bug {
815  static const size_t alignment = __TBB_alignof(T);
816 };
817 #define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<tbb::internal::work_around_alignment_bug<sizeof(T),T>::alignment>
818 #else
819 #define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<__TBB_alignof(T)>
820 #endif /* __TBB_ALIGNOF_NOT_INSTANTIATED_TYPES_BROKEN */
821 
822 #endif /* __TBB_TypeWithAlignmentAtLeastAsStrict */
823 
824 // Template class here is to avoid instantiation of the static data for modules that don't use it
825 template<typename T>
826 struct reverse {
827  static const T byte_table[256];
828 };
829 // An efficient implementation of the reverse function utilizes a 2^8 lookup table holding the bit-reversed
830 // values of [0..2^8 - 1]. Those values can also be computed on the fly at a slightly higher cost.
831 template<typename T>
832 const T reverse<T>::byte_table[256] = {
833  0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
834  0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
835  0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
836  0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
837  0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
838  0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
839  0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
840  0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
841  0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
842  0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
843  0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
844  0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
845  0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
846  0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
847  0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
848  0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
849 };
850 
851 } // namespace internal @endcond
852 } // namespace tbb
853 
854 // Preserving access to legacy APIs
857 
858 // Mapping historically used names to the ones expected by atomic_load_store_traits
859 #define __TBB_load_acquire __TBB_load_with_acquire
860 #define __TBB_store_release __TBB_store_with_release
861 
862 #ifndef __TBB_Log2
863 inline intptr_t __TBB_Log2( uintptr_t x ) {
864  if( x==0 ) return -1;
865  intptr_t result = 0;
866 
867 #if !defined(_M_ARM)
868  uintptr_t tmp_;
869  if( sizeof(x)>4 && (tmp_ = ((uint64_t)x)>>32) ) { x=tmp_; result += 32; }
870 #endif
871  if( uintptr_t tmp = x>>16 ) { x=tmp; result += 16; }
872  if( uintptr_t tmp = x>>8 ) { x=tmp; result += 8; }
873  if( uintptr_t tmp = x>>4 ) { x=tmp; result += 4; }
874  if( uintptr_t tmp = x>>2 ) { x=tmp; result += 2; }
875 
876  return (x&2)? result+1: result;
877 }
878 #endif
879 
880 #ifndef __TBB_AtomicOR
881 inline void __TBB_AtomicOR( volatile void *operand, uintptr_t addend ) {
882  for( tbb::internal::atomic_backoff b;;b.pause() ) {
883  uintptr_t tmp = *(volatile uintptr_t *)operand;
884  uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp);
885  if( result==tmp ) break;
886  }
887 }
888 #endif
889 
890 #ifndef __TBB_AtomicAND
891 inline void __TBB_AtomicAND( volatile void *operand, uintptr_t addend ) {
892  for( tbb::internal::atomic_backoff b;;b.pause() ) {
893  uintptr_t tmp = *(volatile uintptr_t *)operand;
894  uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&addend, tmp);
895  if( result==tmp ) break;
896  }
897 }
898 #endif
899 
900 #if __TBB_PREFETCHING
901 #ifndef __TBB_cl_prefetch
902 #error This platform does not define cache management primitives required for __TBB_PREFETCHING
903 #endif
904 
905 #ifndef __TBB_cl_evict
906 #define __TBB_cl_evict(p)
907 #endif
908 #endif
909 
910 #ifndef __TBB_Flag
911 typedef unsigned char __TBB_Flag;
912 #endif
914 
915 #ifndef __TBB_TryLockByte
916 inline bool __TBB_TryLockByte( __TBB_atomic_flag &flag ) {
917  return __TBB_machine_cmpswp1(&flag,1,0)==0;
918 }
919 #endif
920 
921 #ifndef __TBB_LockByte
924  while( !__TBB_TryLockByte(flag) ) backoff.pause();
925  return 0;
926 }
927 #endif
928 
929 #ifndef __TBB_UnlockByte
930 #define __TBB_UnlockByte(addr) __TBB_store_with_release((addr),0)
931 #endif
932 
933 // lock primitives with Intel(R) Transactional Synchronization Extensions (Intel(R) TSX)
934 #if ( __TBB_x86_32 || __TBB_x86_64 ) /* only on ia32/intel64 */
935 inline void __TBB_TryLockByteElidedCancel() { __TBB_machine_try_lock_elided_cancel(); }
936 
937 inline bool __TBB_TryLockByteElided( __TBB_atomic_flag& flag ) {
938  bool res = __TBB_machine_try_lock_elided( &flag )!=0;
939  // to avoid the "lemming" effect, we need to abort the transaction
940  // if __TBB_machine_try_lock_elided returns false (i.e., someone else
941  // has acquired the mutex non-speculatively).
942  if( !res ) __TBB_TryLockByteElidedCancel();
943  return res;
944 }
945 
946 inline void __TBB_LockByteElided( __TBB_atomic_flag& flag )
947 {
948  for(;;) {
950  if( __TBB_machine_try_lock_elided( &flag ) )
951  return;
952  // Another thread acquired the lock "for real".
953  // To avoid the "lemming" effect, we abort the transaction.
954  __TBB_TryLockByteElidedCancel();
955  }
956 }
957 
958 inline void __TBB_UnlockByteElided( __TBB_atomic_flag& flag ) {
960 }
961 #endif
962 
963 #ifndef __TBB_ReverseByte
964 inline unsigned char __TBB_ReverseByte(unsigned char src) {
966 }
967 #endif
968 
969 template<typename T>
971  T dst;
972  unsigned char *original = (unsigned char *) &src;
973  unsigned char *reversed = (unsigned char *) &dst;
974 
975  for( int i = sizeof(T)-1; i >= 0; i-- )
976  reversed[i] = __TBB_ReverseByte( original[sizeof(T)-i-1] );
977 
978  return dst;
979 }
980 
981 #endif /* __TBB_machine_H */
__TBB_ReverseByte
unsigned char __TBB_ReverseByte(unsigned char src)
Definition: tbb_machine.h:964
tbb::internal::atomic_backoff::count
int32_t count
Definition: tbb_machine.h:353
tbb::internal::__TBB_CompareAndSwapGeneric< 8, int64_t >
int64_t __TBB_CompareAndSwapGeneric< 8, int64_t >(volatile void *ptr, int64_t value, int64_t comparand)
Definition: tbb_machine.h:504
linux_common.h
tbb::internal::type_with_alignment< 8 >::member
__TBB_machine_type_with_alignment_8 member
Definition: tbb_machine.h:804
tbb::internal::__TBB_load_with_acquire
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:712
tbb::internal::atomic_selector< 2 >::word
int16_t word
Definition: tbb_machine.h:144
__TBB_machine_cmpswp1
__int8 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp1(volatile void *ptr, __int8 value, __int8 comparand)
tbb::internal::__TBB_DefineTypeWithAlignment
__TBB_DefineTypeWithAlignment(8) __TBB_DefineTypeWithAlignment(16) __TBB_DefineTypeWithAlignment(32) __TBB_DefineTypeWithAlignment(64) typedef __TBB_machine_type_with_alignment_64 __TBB_machine_type_with_strictest_alignment
internal
Definition: _flow_graph_async_msg_impl.h:24
tbb::internal::no_copy
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
tbb::internal::spin_wait_while
void spin_wait_while(predicate_type condition)
Definition: tbb_machine.h:408
__TBB_Flag
unsigned char __TBB_Flag
Definition: tbb_machine.h:911
tbb::store
void store(atomic< T > &a, T value)
Definition: atomic.h:549
__TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED
#define __TBB_MACHINE_DEFINE_LOAD8_GENERIC_FENCED(M)
Definition: tbb_machine.h:173
sunos_sparc.h
tbb::internal::machine_load_store::load_with_acquire
static T load_with_acquire(const volatile T &location)
Definition: icc_generic.h:95
__TBB_machine_store8
void __TBB_EXPORTED_FUNC __TBB_machine_store8(volatile void *ptr, __int64 value)
msvc_armv7.h
tbb::internal::machine_load_store
Definition: icc_generic.h:94
tbb::internal::type_with_alignment
Definition: tbb_machine.h:798
tbb::internal::machine_load_store_relaxed::load
static T load(const T &location)
Definition: icc_generic.h:105
tbb::internal::machine_load_store::store_with_release
static void store_with_release(volatile T &location, T value)
Definition: icc_generic.h:98
tbb::internal::reverse
Definition: tbb_machine.h:826
tbb::internal::atomic_backoff::pause
void pause()
Pause for a while.
Definition: tbb_machine.h:363
__TBB_LockByte
__TBB_Flag __TBB_LockByte(__TBB_atomic_flag &flag)
Definition: tbb_machine.h:922
tbb::internal::__TBB_CompareAndSwapGeneric
T __TBB_CompareAndSwapGeneric(volatile void *ptr, T value, T comparand)
tbb
The graph class.
Definition: serial/tbb/parallel_for.h:46
tbb::internal::atomic_backoff::LOOPS_BEFORE_YIELD
static const int32_t LOOPS_BEFORE_YIELD
Time delay, in units of "pause" instructions.
Definition: tbb_machine.h:352
__TBB_atomic_flag
__TBB_atomic __TBB_Flag __TBB_atomic_flag
Definition: tbb_machine.h:913
__TBB_machine_unlock_elided
void __TBB_EXPORTED_FUNC __TBB_machine_unlock_elided(volatile void *ptr)
tbb::internal::__TBB_load_full_fence
T __TBB_load_full_fence(const volatile T &location)
Definition: tbb_machine.h:725
tbb::internal::machine_load_store_seq_cst::store
static void store(volatile T &location, T value)
Definition: icc_generic.h:119
tbb::internal::spin_wait_while_eq
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:394
tbb::internal::reverse::byte_table
static const T byte_table[256]
Definition: tbb_machine.h:827
linux_ia64.h
tbb::internal::__TBB_CompareAndSwapGeneric< 2, int16_t >
int16_t __TBB_CompareAndSwapGeneric< 2, int16_t >(volatile void *ptr, int16_t value, int16_t comparand)
Definition: tbb_machine.h:488
__declspec
__declspec(dllimport) int __stdcall SwitchToThread(void)
tbb::internal::atomic_backoff::reset
void reset()
Definition: tbb_machine.h:386
__TBB_alignof
#define __TBB_alignof(T)
Definition: tbb_machine.h:766
tbb::full_fence
@ full_fence
Sequential consistency.
Definition: atomic.h:55
tbb::internal::__TBB_FetchAndStoreGeneric
T __TBB_FetchAndStoreGeneric(volatile void *ptr, T value)
Definition: tbb_machine.h:522
__TBB_release_consistency_helper
#define __TBB_release_consistency_helper()
Definition: gcc_generic.h:58
__TBB_atomic
#define __TBB_atomic
Definition: tbb_stddef.h:237
tbb::internal::__TBB_store_full_fence
void __TBB_store_full_fence(volatile T &location, V value)
Definition: tbb_machine.h:729
tbb::internal::atomic_backoff
Class that implements exponential backoff.
Definition: tbb_machine.h:348
tbb::internal::__TBB_CompareAndSwapGeneric< 4, int32_t >
int32_t __TBB_CompareAndSwapGeneric< 4, int32_t >(volatile void *ptr, int32_t value, int32_t comparand)
Definition: tbb_machine.h:497
macos_common.h
__TBB_TryLockByte
bool __TBB_TryLockByte(__TBB_atomic_flag &flag)
Definition: tbb_machine.h:916
tbb::internal::__TBB_store_relaxed
void __TBB_store_relaxed(volatile T &location, V value)
Definition: tbb_machine.h:742
tbb::internal::machine_load_store_relaxed::store
static void store(T &location, T value)
Definition: icc_generic.h:108
gcc_generic.h
__TBB_acquire_consistency_helper
#define __TBB_acquire_consistency_helper()
Definition: gcc_generic.h:57
__TBB_full_memory_fence
#define __TBB_full_memory_fence()
Definition: gcc_generic.h:59
tbb::internal::__TBB_store_with_release
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:716
tbb::internal::type_with_alignment< 4 >::member
uint32_t member
Definition: tbb_machine.h:803
tbb::internal::atomic_selector
Definition: tbb_machine.h:136
tbb::load
struct __TBB_DEPRECATED_VERBOSE_MSG("tbb::atomic is deprecated, use std::atomic") atomic< T * > struct __TBB_DEPRECATED_VERBOSE_MSG("tbb::atomic is deprecated, use std::atomic") atomic< void * > T load(const atomic< T > &a)
Specialization for atomic<T*> with arithmetic and operator->.
Definition: atomic.h:546
tbb::internal::machine_load_store_relaxed
Definition: icc_generic.h:104
tbb::internal::atomic_selector< 4 >::word
int32_t word
Definition: tbb_machine.h:153
windows_intel64.h
__TBB_AtomicAND
void __TBB_AtomicAND(volatile void *operand, uintptr_t addend)
Definition: tbb_machine.h:891
__TBB_Yield
#define __TBB_Yield()
Definition: ibm_aix51.h:44
tbb::internal::type_with_alignment< 64 >::member
__TBB_machine_type_with_alignment_64 member
Definition: tbb_machine.h:807
linux_intel64.h
ibm_aix51.h
tbb::internal::__TBB_CompareAndSwapGeneric< 1, int8_t >
int8_t __TBB_CompareAndSwapGeneric< 1, int8_t >(volatile void *ptr, int8_t value, int8_t comparand)
Definition: tbb_machine.h:479
__TBB_machine_try_lock_elided
__int8 __TBB_EXPORTED_FUNC __TBB_machine_try_lock_elided(volatile void *ptr)
tbb::internal::msvc_intrinsics::word
long word
Definition: msvc_ia32_common.h:44
linux_ia32.h
__TBB_machine_cmpswp8
#define __TBB_machine_cmpswp8
Definition: ibm_aix51.h:42
__TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED
#define __TBB_MACHINE_DEFINE_STORE8_GENERIC_FENCED(M)
Definition: tbb_machine.h:165
__TBB_ENDIAN_BIG
#define __TBB_ENDIAN_BIG
Definition: tbb_machine.h:184
__TBB_machine_cmpswp4
#define __TBB_machine_cmpswp4
Definition: ibm_aix51.h:41
tbb::internal::__TBB_MaskedCompareAndSwap
T __TBB_MaskedCompareAndSwap(volatile T *const ptr, const T value, const T comparand)
Definition: tbb_machine.h:435
__TBB_AtomicOR
void __TBB_AtomicOR(volatile void *operand, uintptr_t addend)
Definition: tbb_machine.h:881
__TBB_Log2
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:863
__TBB_Pause
void __TBB_Pause(int32_t)
Definition: tbb_machine.h:334
__TBB_ENDIANNESS
#define __TBB_ENDIANNESS
Definition: tbb_machine.h:417
__TBB_atomic_flag
__TBB_atomic __TBB_Flag __TBB_atomic_flag
Definition: gcc_generic.h:106
tbb::internal::atomic_backoff::bounded_pause
bool bounded_pause()
Pause for a few times and return false if saturated.
Definition: tbb_machine.h:375
__TBB_ReverseBits
T __TBB_ReverseBits(T src)
Definition: tbb_machine.h:970
tbb::internal::__TBB_FetchAndAddGeneric
T __TBB_FetchAndAddGeneric(volatile void *ptr, T addend)
Definition: tbb_machine.h:510
tbb::internal::atomic_backoff::atomic_backoff
atomic_backoff()
Definition: tbb_machine.h:358
tbb::internal::atomic_backoff::atomic_backoff
atomic_backoff(bool)
Definition: tbb_machine.h:360
mac_ppc.h
value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
Definition: ittnotify_static.h:192
tbb::internal::type_with_alignment< 1 >::member
char member
Definition: tbb_machine.h:801
icc_generic.h
__TBB_machine_try_lock_elided_cancel
static void __TBB_machine_try_lock_elided_cancel()
Definition: gcc_itsx.h:44
tbb::internal::machine_load_store_seq_cst
Definition: icc_generic.h:114
gcc_arm.h
tbb::internal::type_with_alignment< 2 >::member
uint16_t member
Definition: tbb_machine.h:802
tbb_stddef.h
tbb::atomic_fence
void atomic_fence()
Sequentially consistent full memory fence.
Definition: tbb_machine.h:342
__TBB_machine_cmpswp2
__int16 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp2(volatile void *ptr, __int16 value, __int16 comparand)
tbb::internal::spin_wait_until_eq
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
Definition: tbb_machine.h:402
__TBB_Flag
unsigned char __TBB_Flag
Definition: gcc_generic.h:105
tbb::internal::machine_load_store_seq_cst::load
static T load(const volatile T &location)
Definition: icc_generic.h:115
windows_ia32.h
mic_common.h
mask
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int mask
Definition: ittnotify_static.h:109
tbb::internal::type_with_alignment< 32 >::member
__TBB_machine_type_with_alignment_32 member
Definition: tbb_machine.h:806
tbb::internal::atomic_selector< 8 >::word
int64_t word
Definition: tbb_machine.h:159
tbb::internal::type_with_alignment< 16 >::member
__TBB_machine_type_with_alignment_16 member
Definition: tbb_machine.h:805
tbb::internal::__TBB_load_relaxed
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:738
tbb::internal::atomic_selector< 1 >::word
int8_t word
Definition: tbb_machine.h:139
__TBB_machine_load8
__int64 __TBB_EXPORTED_FUNC __TBB_machine_load8(const volatile void *ptr)

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.