summaryrefslogtreecommitdiff
path: root/lib/libpthread/pthread_lock.c
diff options
context:
space:
mode:
authorad <ad@NetBSD.org>2007-08-16 13:54:16 +0000
committerad <ad@NetBSD.org>2007-08-16 13:54:16 +0000
commitd9adedd76400ff3a3069a7e9f55a78ff6aeb43e8 (patch)
tree4e3cfb7305417e09804e79b93bf94ca7313bc87b /lib/libpthread/pthread_lock.c
parent8423867fa7ce95339fc3981d44d13c064fd8614e (diff)
Trim fat off libpthread internal spinlock operations. Makes a mesurable
improvement across the board.
Diffstat (limited to 'lib/libpthread/pthread_lock.c')
-rw-r--r--lib/libpthread/pthread_lock.c219
1 files changed, 95 insertions, 124 deletions
diff --git a/lib/libpthread/pthread_lock.c b/lib/libpthread/pthread_lock.c
index f39c68f4a0c..c5f8d027881 100644
--- a/lib/libpthread/pthread_lock.c
+++ b/lib/libpthread/pthread_lock.c
@@ -1,4 +1,4 @@
-/* $NetBSD: pthread_lock.c,v 1.22 2007/08/16 12:01:49 ad Exp $ */
+/* $NetBSD: pthread_lock.c,v 1.23 2007/08/16 13:54:17 ad Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007 The NetBSD Foundation, Inc.
@@ -36,8 +36,12 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+/*
+ * libpthread internal spinlock routines.
+ */
+
#include <sys/cdefs.h>
-__RCSID("$NetBSD: pthread_lock.c,v 1.22 2007/08/16 12:01:49 ad Exp $");
+__RCSID("$NetBSD: pthread_lock.c,v 1.23 2007/08/16 13:54:17 ad Exp $");
#include <sys/types.h>
#include <sys/lock.h>
@@ -60,9 +64,12 @@ __RCSID("$NetBSD: pthread_lock.c,v 1.22 2007/08/16 12:01:49 ad Exp $");
#define SDPRINTF(x)
#endif
+static void pthread_spinlock_slow(pthread_spin_t *);
+
static int pthread__atomic;
RAS_DECL(pthread__lock);
+RAS_DECL(pthread__lock2);
void
pthread__simple_lock_init(__cpu_simple_lock_t *alp)
@@ -96,67 +103,66 @@ inline void
pthread__simple_unlock(__cpu_simple_lock_t *alp)
{
+#ifdef PTHREAD__CHEAP_UNLOCK
+ __cpu_simple_unlock(alp);
+#else
if (pthread__atomic) {
__cpu_simple_unlock(alp);
return;
}
-
*alp = __SIMPLELOCK_UNLOCKED;
+#endif
}
-/*
- * Initialize the locking primitives. On uniprocessors, we always
- * use Restartable Atomic Sequences if they are available. Otherwise,
- * we fall back onto machine-dependent atomic lock primitives.
- */
void
-pthread__lockprim_init(void)
+pthread_spinlock(pthread_spin_t *lock)
{
- char *p;
-
- if ((p = getenv("PTHREAD_NSPINS")) != NULL)
- pthread__nspins = atoi(p);
- else if (pthread__concurrency != 1)
- pthread__nspins = PTHREAD__NSPINS;
- else
- pthread__nspins = 1;
+#ifdef PTHREAD_SPIN_DEBUG
+ pthread_t thread = pthread__self();
- if (pthread__concurrency != 1) {
- pthread__atomic = 1;
- return;
- }
+ SDPRINTF(("(pthread_spinlock %p) spinlock %p (count %d)\n",
+ thread, lock, thread->pt_spinlocks));
+ pthread__assert(thread->pt_spinlocks >= 0);
+ thread->pt_spinlocks++;
+#endif
- if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock),
- RAS_INSTALL) != 0) {
- pthread__atomic = 1;
- return;
- }
-}
+ if (pthread__atomic) {
+ if (__predict_false(!__cpu_simple_lock_try(lock))) {
+ pthread_spinlock_slow(lock);
+ return;
+ }
+ } else {
+ __cpu_simple_lock_t old;
-void
-pthread_lockinit(pthread_spin_t *lock)
-{
+ RAS_START(pthread__lock2);
+ old = *lock;
+ *lock = __SIMPLELOCK_LOCKED;
+ RAS_END(pthread__lock2);
- pthread__simple_lock_init(lock);
+ if (__predict_false(old != __SIMPLELOCK_UNLOCKED)) {
+ pthread_spinlock_slow(lock);
+ return;
+ }
+ }
+
+ PTHREADD_ADD(PTHREADD_SPINLOCKS);
}
-void
-pthread_spinlock(pthread_t thread, pthread_spin_t *lock)
+/*
+ * Prevent this routine from being inlined. The common case is no
+ * contention and it's better to not burden the instruction decoder.
+ */
+#if __GNUC_PREREQ__(3, 0)
+__attribute ((noinline))
+#endif
+static void
+pthread_spinlock_slow(pthread_spin_t *lock)
{
int count;
-
- SDPRINTF(("(pthread_spinlock %p) spinlock %p (count %d)\n",
- thread, lock, thread->pt_spinlocks));
#ifdef PTHREAD_SPIN_DEBUG
- pthread__assert(thread->pt_spinlocks >= 0);
+ pthread_t thread = pthread__self();
#endif
- thread->pt_spinlocks++;
- if (__predict_true(pthread__simple_lock_try(lock))) {
- PTHREADD_ADD(PTHREADD_SPINLOCKS);
- return;
- }
-
do {
count = pthread__nspins;
while (*lock == __SIMPLELOCK_LOCKED && --count > 0)
@@ -167,133 +173,98 @@ pthread_spinlock(pthread_t thread, pthread_spin_t *lock)
continue;
}
+#ifdef PTHREAD_SPIN_DEBUG
SDPRINTF(("(pthread_spinlock %p) retrying spinlock %p "
"(count %d)\n", thread, lock,
thread->pt_spinlocks));
thread->pt_spinlocks--;
-
/* XXXLWP far from ideal */
sched_yield();
thread->pt_spinlocks++;
+#else
+ /* XXXLWP far from ideal */
+ sched_yield();
+#endif
} while (/*CONSTCOND*/ 1);
PTHREADD_ADD(PTHREADD_SPINLOCKS);
}
int
-pthread_spintrylock(pthread_t thread, pthread_spin_t *lock)
+pthread_spintrylock(pthread_spin_t *lock)
{
+#ifdef PTHREAD_SPIN_DEBUG
+ pthread_t thread = pthread__self();
int ret;
SDPRINTF(("(pthread_spintrylock %p) spinlock %p (count %d)\n",
thread, lock, thread->pt_spinlocks));
-
thread->pt_spinlocks++;
ret = pthread__simple_lock_try(lock);
if (!ret)
thread->pt_spinlocks--;
-
return ret;
+#else
+ return pthread__simple_lock_try(lock);
+#endif
}
void
-pthread_spinunlock(pthread_t thread, pthread_spin_t *lock)
+pthread_spinunlock(pthread_spin_t *lock)
{
+#ifdef PTHREAD_SPIN_DEBUG
+ pthread_t thread = pthread__self();
SDPRINTF(("(pthread_spinunlock %p) spinlock %p (count %d)\n",
thread, lock, thread->pt_spinlocks));
pthread__simple_unlock(lock);
thread->pt_spinlocks--;
-#ifdef PTHREAD_SPIN_DEBUG
pthread__assert(thread->pt_spinlocks >= 0);
-#endif
PTHREADD_ADD(PTHREADD_SPINUNLOCKS);
-}
-
-
-/*
- * Public (POSIX-specified) spinlocks.
- */
-int
-pthread_spin_init(pthread_spinlock_t *lock, int pshared)
-{
-
-#ifdef ERRORCHECK
- if (lock == NULL || (pshared != PTHREAD_PROCESS_PRIVATE &&
- pshared != PTHREAD_PROCESS_SHARED))
- return EINVAL;
-#endif
- lock->pts_magic = _PT_SPINLOCK_MAGIC;
-
- /*
- * We don't actually use the pshared flag for anything;
- * CPU simple locks have all the process-shared properties
- * that we want anyway.
- */
- lock->pts_flags = pshared;
- pthread_lockinit(&lock->pts_spin);
-
- return 0;
-}
-
-int
-pthread_spin_destroy(pthread_spinlock_t *lock)
-{
-
-#ifdef ERRORCHECK
- if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
- return EINVAL;
- if (lock->pts_spin != __SIMPLELOCK_UNLOCKED)
- return EBUSY;
+#else
+ pthread__simple_unlock(lock);
#endif
-
- lock->pts_magic = _PT_SPINLOCK_DEAD;
-
- return 0;
}
-int
-pthread_spin_lock(pthread_spinlock_t *lock)
+/*
+ * Initialize the locking primitives. On uniprocessors, we always
+ * use Restartable Atomic Sequences if they are available. Otherwise,
+ * we fall back onto machine-dependent atomic lock primitives.
+ */
+void
+pthread__lockprim_init(void)
{
+ char *p;
-#ifdef ERRORCHECK
- if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
- return EINVAL;
-#endif
+ if ((p = getenv("PTHREAD_NSPINS")) != NULL)
+ pthread__nspins = atoi(p);
+ else if (pthread__concurrency != 1)
+ pthread__nspins = PTHREAD__NSPINS;
+ else
+ pthread__nspins = 1;
- while (pthread__simple_lock_try(&lock->pts_spin) == 0) {
- pthread__smt_pause();
+ if (pthread__concurrency != 1) {
+ pthread__atomic = 1;
+ return;
}
- return 0;
-}
-
-int
-pthread_spin_trylock(pthread_spinlock_t *lock)
-{
-
-#ifdef ERRORCHECK
- if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
- return EINVAL;
-#endif
-
- if (pthread__simple_lock_try(&lock->pts_spin) == 0)
- return EBUSY;
+ if (rasctl(RAS_ADDR(pthread__lock), RAS_SIZE(pthread__lock),
+ RAS_INSTALL) != 0) {
+ pthread__atomic = 1;
+ return;
+ }
- return 0;
+ if (rasctl(RAS_ADDR(pthread__lock2), RAS_SIZE(pthread__lock2),
+ RAS_INSTALL) != 0) {
+ pthread__atomic = 1;
+ return;
+ }
}
-int
-pthread_spin_unlock(pthread_spinlock_t *lock)
+void
+pthread_lockinit(pthread_spin_t *lock)
{
-#ifdef ERRORCHECK
- if (lock == NULL || lock->pts_magic != _PT_SPINLOCK_MAGIC)
- return EINVAL;
-#endif
-
- pthread__simple_unlock(&lock->pts_spin);
-
- return 0;
+ pthread__simple_lock_init(lock);
}