1 // Copyright (c) 2005, 2006, Google Inc.
2 // Copyright (c) 2010, Patrick Gansterer <paroga@paroga.com>
3 // All rights reserved.
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 // Author: Sanjay Ghemawat <opensource@google.com>
34 #ifndef TCMALLOC_INTERNAL_SPINLOCK_H__
35 #define TCMALLOC_INTERNAL_SPINLOCK_H__
37 #if (CPU(X86) || CPU(X86_64) || CPU(PPC)) && (COMPILER(GCC) || COMPILER(MSVC))
39 #include <time.h> /* For nanosleep() */
43 #elif HAVE(INTTYPES_H)
46 #include <sys/types.h>
50 #ifndef WIN32_LEAN_AND_MEAN
51 #define WIN32_LEAN_AND_MEAN
55 #include <sched.h> /* For sched_yield() */
58 static void TCMalloc_SlowLock(volatile unsigned int* lockword);
60 // The following is a struct so that it can be initialized at compile time
61 struct TCMalloc_SpinLock {
66 #if CPU(X86) || CPU(X86_64)
69 : "=r"(r), "=m"(lockword_)
70 : "0"(1), "m"(lockword_)
73 volatile unsigned int *lockword_ptr = &lockword_;
75 ("1: lwarx %0, 0, %1\n\t"
76 "stwcx. %2, 0, %1\n\t"
79 : "=&r" (r), "=r" (lockword_ptr)
80 : "r" (1), "1" (lockword_ptr)
85 mov eax, this ; store &lockword_ (which is this+0) in eax
86 mov ebx, 1 ; store 1 in ebx
87 xchg [eax], ebx ; exchange lockword_ and 1
88 mov r, ebx ; store old value of lockword_ in r
91 if (r) TCMalloc_SlowLock(&lockword_);
94 inline void Unlock() {
96 #if CPU(X86) || CPU(X86_64)
107 #if OS(DARWIN) || CPU(PPC)
117 mov eax, this ; store &lockword_ (which is this+0) in eax
118 mov [eax], 0 ; set lockword_ to 0
122 // Report if we think the lock can be held by this thread.
123 // When the lock is truly held by the invoking thread
124 // we will always return true.
125 // Indended to be used as CHECK(lock.IsHeld());
126 inline bool IsHeld() const {
127 return lockword_ != 0;
130 inline void Init() { lockword_ = 0; }
132 volatile unsigned int lockword_;
135 #define SPINLOCK_INITIALIZER { 0 }
137 static void TCMalloc_SlowLock(volatile unsigned int* lockword) {
138 // Yield immediately since fast path failed
147 #if CPU(X86) || CPU(X86_64)
150 : "=r"(r), "=m"(*lockword)
151 : "0"(1), "m"(*lockword)
157 ("1: lwarx %0, 0, %1\n\t"
158 "stwcx. %2, 0, %1\n\t"
161 : "=&r" (r), "=r" (lockword)
162 : "r" (tmp), "1" (lockword)
167 mov eax, lockword ; assign lockword into eax
168 mov ebx, 1 ; assign 1 into ebx
169 xchg [eax], ebx ; exchange *lockword and 1
170 mov r, ebx ; store old value of *lockword in r
177 // This code was adapted from the ptmalloc2 implementation of
178 // spinlocks which would sched_yield() upto 50 times before
179 // sleeping once for a few milliseconds. Mike Burrows suggested
180 // just doing one sched_yield() outside the loop and always
181 // sleeping after that. This change helped a great deal on the
182 // performance of spinlocks under high contention. A test program
183 // with 10 threads on a dual Xeon (four virtual processors) went
184 // from taking 30 seconds to 16 seconds.
186 // Sleep for a few milliseconds
192 tm.tv_nsec = 2000001;
193 nanosleep(&tm, NULL);
200 #ifndef WIN32_LEAN_AND_MEAN
201 #define WIN32_LEAN_AND_MEAN
205 static void TCMalloc_SlowLock(LPLONG lockword);
207 // The following is a struct so that it can be initialized at compile time
208 struct TCMalloc_SpinLock {
211 if (InterlockedExchange(&m_lockword, 1))
212 TCMalloc_SlowLock(&m_lockword);
215 inline void Unlock() {
216 InterlockedExchange(&m_lockword, 0);
219 inline bool IsHeld() const {
220 return m_lockword != 0;
223 inline void Init() { m_lockword = 0; }
228 #define SPINLOCK_INITIALIZER { 0 }
230 static void TCMalloc_SlowLock(LPLONG lockword) {
231 Sleep(0); // Yield immediately since fast path failed
232 while (InterlockedExchange(lockword, 1))
241 struct TCMalloc_SpinLock {
242 pthread_mutex_t private_lock_;
245 if (pthread_mutex_init(&private_lock_, NULL) != 0) CRASH();
247 inline void Finalize() {
248 if (pthread_mutex_destroy(&private_lock_) != 0) CRASH();
251 if (pthread_mutex_lock(&private_lock_) != 0) CRASH();
253 inline void Unlock() {
254 if (pthread_mutex_unlock(&private_lock_) != 0) CRASH();
257 if (pthread_mutex_trylock(&private_lock_))
265 #define SPINLOCK_INITIALIZER { PTHREAD_MUTEX_INITIALIZER }
269 // Corresponding locker object that arranges to acquire a spinlock for
270 // the duration of a C++ scope.
271 class TCMalloc_SpinLockHolder {
273 TCMalloc_SpinLock* lock_;
275 inline explicit TCMalloc_SpinLockHolder(TCMalloc_SpinLock* l)
276 : lock_(l) { l->Lock(); }
277 inline ~TCMalloc_SpinLockHolder() { lock_->Unlock(); }
280 // Short-hands for convenient use by tcmalloc.cc
281 typedef TCMalloc_SpinLock SpinLock;
282 typedef TCMalloc_SpinLockHolder SpinLockHolder;
284 #endif // TCMALLOC_INTERNAL_SPINLOCK_H__