00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048 #ifndef __SGI_STL_INTERNAL_THREADS_H
00049 #define __SGI_STL_INTERNAL_THREADS_H
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061 #if defined(__STL_GTHREADS)
00062 #include "bits/gthr.h"
00063 #else
00064
00065 #if defined(__STL_SGI_THREADS)
00066 #include <mutex.h>
00067 #include <time.h>
00068 #elif defined(__STL_PTHREADS)
00069 #include <pthread.h>
00070 #elif defined(__STL_UITHREADS)
00071 #include <thread.h>
00072 #include <synch.h>
00073 #elif defined(__STL_WIN32THREADS)
00074 #include <windows.h>
00075 #endif
00076
00077 #endif
00078
00079
00080 namespace std
00081 {
00082
00083
00084
00085
00086
00087
00088
00089 #if defined(__STL_SGI_THREADS) && !defined(__add_and_fetch) && \
00090 (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
00091 # define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
00092 # define __test_and_set(__l,__v) test_and_set(__l,__v)
00093 #endif
00094
00095 struct _Refcount_Base
00096 {
00097
00098 # ifdef __STL_WIN32THREADS
00099 typedef long _RC_t;
00100 # else
00101 typedef size_t _RC_t;
00102 #endif
00103
00104
00105 volatile _RC_t _M_ref_count;
00106
00107
00108
00109 #ifdef __STL_GTHREADS
00110 __gthread_mutex_t _M_ref_count_lock;
00111 _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
00112 {
00113 #ifdef __GTHREAD_MUTEX_INIT
00114 __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
00115 _M_ref_count_lock = __tmp;
00116 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00117 __GTHREAD_MUTEX_INIT_FUNCTION (&_M_ref_count_lock);
00118 #else
00119 #error __GTHREAD_MUTEX_INIT or __GTHREAD_MUTEX_INIT_FUNCTION should be defined by gthr.h abstraction layer, report problem to libstdc++@gcc.gnu.org.
00120 #endif
00121 }
00122 #else
00123
00124 # ifdef __STL_PTHREADS
00125 pthread_mutex_t _M_ref_count_lock;
00126 _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
00127 { pthread_mutex_init(&_M_ref_count_lock, 0); }
00128 # elif defined(__STL_UITHREADS)
00129 mutex_t _M_ref_count_lock;
00130 _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
00131 { mutex_init(&_M_ref_count_lock, USYNC_THREAD, 0); }
00132 # else
00133 _Refcount_Base(_RC_t __n) : _M_ref_count(__n) {}
00134 # endif
00135
00136 #endif
00137
00138
00139
00140 #ifdef __STL_GTHREADS
00141 void _M_incr() {
00142 __gthread_mutex_lock(&_M_ref_count_lock);
00143 ++_M_ref_count;
00144 __gthread_mutex_unlock(&_M_ref_count_lock);
00145 }
00146 _RC_t _M_decr() {
00147 __gthread_mutex_lock(&_M_ref_count_lock);
00148 volatile _RC_t __tmp = --_M_ref_count;
00149 __gthread_mutex_unlock(&_M_ref_count_lock);
00150 return __tmp;
00151 }
00152 #else
00153
00154
00155 # ifdef __STL_SGI_THREADS
00156 void _M_incr() { __add_and_fetch(&_M_ref_count, 1); }
00157 _RC_t _M_decr() { return __add_and_fetch(&_M_ref_count, (size_t) -1); }
00158 # elif defined (__STL_WIN32THREADS)
00159 void _M_incr() { InterlockedIncrement((_RC_t*)&_M_ref_count); }
00160 _RC_t _M_decr() { return InterlockedDecrement((_RC_t*)&_M_ref_count); }
00161 # elif defined(__STL_PTHREADS)
00162 void _M_incr() {
00163 pthread_mutex_lock(&_M_ref_count_lock);
00164 ++_M_ref_count;
00165 pthread_mutex_unlock(&_M_ref_count_lock);
00166 }
00167 _RC_t _M_decr() {
00168 pthread_mutex_lock(&_M_ref_count_lock);
00169 volatile _RC_t __tmp = --_M_ref_count;
00170 pthread_mutex_unlock(&_M_ref_count_lock);
00171 return __tmp;
00172 }
00173 # elif defined(__STL_UITHREADS)
00174 void _M_incr() {
00175 mutex_lock(&_M_ref_count_lock);
00176 ++_M_ref_count;
00177 mutex_unlock(&_M_ref_count_lock);
00178 }
00179 _RC_t _M_decr() {
00180 mutex_lock(&_M_ref_count_lock);
00181 _RC_t __tmp = --_M_ref_count;
00182 mutex_unlock(&_M_ref_count_lock);
00183 return __tmp;
00184 }
00185 # else
00186 void _M_incr() { ++_M_ref_count; }
00187 _RC_t _M_decr() { return --_M_ref_count; }
00188 # endif
00189
00190 #endif
00191
00192 };
00193
00194
00195
00196
00197
00198
00199 #ifdef __STL_GTHREADS
00200
00201
00202
00203
00204
00205 #else
00206
00207 # ifdef __STL_SGI_THREADS
00208 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00209 # if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
00210 return test_and_set(__p, __q);
00211 # else
00212 return __test_and_set(__p, (unsigned long)__q);
00213 # endif
00214 }
00215 # elif defined(__STL_WIN32THREADS)
00216 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00217 return (unsigned long) InterlockedExchange((LPLONG)__p, (LONG)__q);
00218 }
00219 # elif defined(__STL_PTHREADS)
00220
00221 template<int __dummy>
00222 struct _Swap_lock_struct {
00223 static pthread_mutex_t _S_swap_lock;
00224 };
00225
00226 template<int __dummy>
00227 pthread_mutex_t
00228 _Swap_lock_struct<__dummy>::_S_swap_lock = PTHREAD_MUTEX_INITIALIZER;
00229
00230
00231
00232
00233 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00234 pthread_mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00235 unsigned long __result = *__p;
00236 *__p = __q;
00237 pthread_mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00238 return __result;
00239 }
00240 # elif defined(__STL_UITHREADS)
00241
00242 template<int __dummy>
00243 struct _Swap_lock_struct {
00244 static mutex_t _S_swap_lock;
00245 };
00246
00247 template<int __dummy>
00248 mutex_t
00249 _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
00250
00251
00252
00253
00254 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00255 mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00256 unsigned long __result = *__p;
00257 *__p = __q;
00258 mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00259 return __result;
00260 }
00261 # elif defined (__STL_SOLARIS_THREADS)
00262
00263
00264 template<int __dummy>
00265 struct _Swap_lock_struct {
00266 static mutex_t _S_swap_lock;
00267 };
00268
00269 # if ( __STL_STATIC_TEMPLATE_DATA > 0 )
00270 template<int __dummy>
00271 mutex_t
00272 _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
00273 # else
00274 __DECLARE_INSTANCE(mutex_t, _Swap_lock_struct<__dummy>::_S_swap_lock,
00275 =DEFAULTMUTEX);
00276 # endif
00277
00278
00279
00280
00281 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00282 mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00283 unsigned long __result = *__p;
00284 *__p = __q;
00285 mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00286 return __result;
00287 }
00288 # else
00289 static inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00290 unsigned long __result = *__p;
00291 *__p = __q;
00292 return __result;
00293 }
00294 # endif
00295
00296 #endif
00297
00298
00299
00300
00301
00302
00303
00304
00305
00306
00307
00308
00309
00310
00311
00312
00313 template <int __inst>
00314 struct _STL_mutex_spin {
00315 enum { __low_max = 30, __high_max = 1000 };
00316
00317
00318 static unsigned __max;
00319 static unsigned __last;
00320 };
00321
00322 template <int __inst>
00323 unsigned _STL_mutex_spin<__inst>::__max = _STL_mutex_spin<__inst>::__low_max;
00324
00325 template <int __inst>
00326 unsigned _STL_mutex_spin<__inst>::__last = 0;
00327
00328
00329 #if defined(__STL_GTHREADS)
00330 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00331 extern __gthread_mutex_t _GLIBCPP_mutex;
00332 extern __gthread_mutex_t *_GLIBCPP_mutex_address;
00333 extern __gthread_once_t _GLIBCPP_once;
00334 extern void _GLIBCPP_mutex_init (void);
00335 extern void _GLIBCPP_mutex_address_init (void);
00336 #endif
00337 #endif
00338
00339
00340 struct _STL_mutex_lock
00341 {
00342
00343 #if defined(__STL_GTHREADS)
00344
00345 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00346 volatile int _M_init_flag;
00347 __gthread_once_t _M_once;
00348 #endif
00349 __gthread_mutex_t _M_lock;
00350 void _M_initialize() {
00351 #ifdef __GTHREAD_MUTEX_INIT
00352
00353 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00354 if (_M_init_flag) return;
00355 if (__gthread_once (&_GLIBCPP_once, _GLIBCPP_mutex_init) != 0
00356 && __gthread_active_p ())
00357 abort ();
00358 __gthread_mutex_lock (&_GLIBCPP_mutex);
00359 if (!_M_init_flag) {
00360
00361
00362
00363 _GLIBCPP_mutex_address = &_M_lock;
00364 if (__gthread_once (&_M_once, _GLIBCPP_mutex_address_init) != 0
00365 && __gthread_active_p ())
00366 abort ();
00367 _M_init_flag = 1;
00368 }
00369 __gthread_mutex_unlock (&_GLIBCPP_mutex);
00370 #endif
00371 }
00372 void _M_acquire_lock() {
00373 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00374 if (!_M_init_flag) _M_initialize();
00375 #endif
00376 __gthread_mutex_lock(&_M_lock);
00377 }
00378 void _M_release_lock() {
00379 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00380 if (!_M_init_flag) _M_initialize();
00381 #endif
00382 __gthread_mutex_unlock(&_M_lock);
00383 }
00384 #else
00385
00386 #if defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
00387
00388 volatile unsigned long _M_lock;
00389 void _M_initialize() { _M_lock = 0; }
00390 static void _S_nsec_sleep(int __log_nsec) {
00391 # ifdef __STL_SGI_THREADS
00392 struct timespec __ts;
00393
00394 __ts.tv_sec = 0;
00395 __ts.tv_nsec = 1L << __log_nsec;
00396 nanosleep(&__ts, 0);
00397 # elif defined(__STL_WIN32THREADS)
00398 if (__log_nsec <= 20) {
00399 Sleep(0);
00400 } else {
00401 Sleep(1 << (__log_nsec - 20));
00402 }
00403 # else
00404 # error unimplemented
00405 # endif
00406 }
00407 void _M_acquire_lock() {
00408 volatile unsigned long* __lock = &this->_M_lock;
00409
00410 if (!_Atomic_swap((unsigned long*)__lock, 1)) {
00411 return;
00412 }
00413 unsigned __my_spin_max = _STL_mutex_spin<0>::__max;
00414 unsigned __my_last_spins = _STL_mutex_spin<0>::__last;
00415 volatile unsigned __junk = 17;
00416 unsigned __i;
00417 for (__i = 0; __i < __my_spin_max; __i++) {
00418 if (__i < __my_last_spins/2 || *__lock) {
00419 __junk *= __junk; __junk *= __junk;
00420 __junk *= __junk; __junk *= __junk;
00421 continue;
00422 }
00423 if (!_Atomic_swap((unsigned long*)__lock, 1)) {
00424
00425
00426
00427
00428 _STL_mutex_spin<0>::__last = __i;
00429 _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__high_max;
00430 return;
00431 }
00432 }
00433
00434 _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__low_max;
00435 for (__i = 0 ;; ++__i) {
00436 int __log_nsec = __i + 6;
00437
00438 if (__log_nsec > 27) __log_nsec = 27;
00439 if (!_Atomic_swap((unsigned long *)__lock, 1)) {
00440 return;
00441 }
00442 _S_nsec_sleep(__log_nsec);
00443 }
00444 }
00445 void _M_release_lock() {
00446 volatile unsigned long* __lock = &_M_lock;
00447 # if defined(__STL_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
00448 asm("sync");
00449 *__lock = 0;
00450 # elif defined(__STL_SGI_THREADS) && __mips >= 3 \
00451 && (defined (_ABIN32) || defined(_ABI64))
00452 __lock_release(__lock);
00453 # else
00454 *__lock = 0;
00455
00456
00457 # endif
00458 }
00459
00460
00461
00462
00463
00464 #elif defined(__STL_PTHREADS)
00465 pthread_mutex_t _M_lock;
00466 void _M_initialize() { pthread_mutex_init(&_M_lock, NULL); }
00467 void _M_acquire_lock() { pthread_mutex_lock(&_M_lock); }
00468 void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
00469 #elif defined(__STL_UITHREADS)
00470 mutex_t _M_lock;
00471 void _M_initialize() { mutex_init(&_M_lock, USYNC_THREAD, 0); }
00472 void _M_acquire_lock() { mutex_lock(&_M_lock); }
00473 void _M_release_lock() { mutex_unlock(&_M_lock); }
00474 #else
00475 void _M_initialize() {}
00476 void _M_acquire_lock() {}
00477 void _M_release_lock() {}
00478 #endif
00479
00480 #endif
00481
00482 };
00483
00484
00485 #if defined(__STL_GTHREADS)
00486 #ifdef __GTHREAD_MUTEX_INIT
00487 #define __STL_MUTEX_INITIALIZER = { __GTHREAD_MUTEX_INIT }
00488 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00489 #ifdef __GTHREAD_MUTEX_INIT_DEFAULT
00490 #define __STL_MUTEX_INITIALIZER \
00491 = { 0, __GTHREAD_ONCE_INIT, __GTHREAD_MUTEX_INIT_DEFAULT }
00492 #else
00493 #define __STL_MUTEX_INITIALIZER = { 0, __GTHREAD_ONCE_INIT }
00494 #endif
00495 #endif
00496 #else
00497
00498 #ifdef __STL_PTHREADS
00499
00500
00501 # define __STL_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
00502 #elif defined(__STL_UITHREADS)
00503
00504
00505 # define __STL_MUTEX_INITIALIZER = { DEFAULTMUTEX }
00506 #elif defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
00507 # define __STL_MUTEX_INITIALIZER = { 0 }
00508 #else
00509 # define __STL_MUTEX_INITIALIZER
00510 #endif
00511
00512 #endif
00513
00514
00515
00516
00517
00518
00519
00520
00521 struct _STL_auto_lock
00522 {
00523 _STL_mutex_lock& _M_lock;
00524
00525 _STL_auto_lock(_STL_mutex_lock& __lock) : _M_lock(__lock)
00526 { _M_lock._M_acquire_lock(); }
00527 ~_STL_auto_lock() { _M_lock._M_release_lock(); }
00528
00529 private:
00530 void operator=(const _STL_auto_lock&);
00531 _STL_auto_lock(const _STL_auto_lock&);
00532 };
00533
00534 }
00535
00536 #endif
00537
00538
00539
00540
00541