summaryrefslogtreecommitdiffhomepage
path: root/src/nxt_spinlock.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/nxt_spinlock.c152
1 files changed, 152 insertions, 0 deletions
diff --git a/src/nxt_spinlock.c b/src/nxt_spinlock.c
new file mode 100644
index 00000000..0348ae23
--- /dev/null
+++ b/src/nxt_spinlock.c
@@ -0,0 +1,152 @@
+
+/*
+ * Copyright (C) Igor Sysoev
+ * Copyright (C) NGINX, Inc.
+ */
+
+#include <nxt_main.h>
+
+
+/*
+ * Linux supports pthread spinlocks since glibc 2.3. Spinlock is an
+ * atomic integer with zero initial value. On i386/amd64 however the
+ * initial value is one. Spinlock never yields control.
+ *
+ * FreeBSD 5.2 and Solaris 10 support pthread spinlocks. Spinlock is a
+ * structure and uses mutex implementation so it must be initialized by
+ * by pthread_spin_init() and destroyed by pthread_spin_destroy().
+ */
+
+#if (NXT_HAVE_MACOSX_SPINLOCK)
+
+/*
+ * OSSpinLockLock() tries to acquire a lock atomically. If the lock is
+ * busy, on SMP system it tests the lock 1000 times in a tight loop with
+ * "pause" instruction. If the lock has been released, OSSpinLockLock()
+ * tries to acquire it again. On failure it goes again in the tight loop.
+ * If the lock has not been released during spinning in the loop or
+ * on UP system, OSSpinLockLock() calls thread_switch() to run 1ms
+ * with depressed (the lowest) priority.
+ */
+
+void
+nxt_thread_spin_lock(nxt_thread_spinlock_t *lock)
+{
+ nxt_thread_log_debug("OSSpinLockLock(%p) enter", lock);
+
+ OSSpinLockLock(lock);
+}
+
+
+nxt_bool_t
+nxt_thread_spin_trylock(nxt_thread_spinlock_t *lock)
+{
+ nxt_thread_log_debug("OSSpinLockTry(%p) enter", lock);
+
+ if (OSSpinLockTry(lock)) {
+ return 1;
+ }
+
+ nxt_thread_log_debug("OSSpinLockTry(%p) failed", lock);
+
+ return 0;
+}
+
+
+void
+nxt_thread_spin_unlock(nxt_thread_spinlock_t *lock)
+{
+ OSSpinLockUnlock(lock);
+
+ nxt_thread_log_debug("OSSpinLockUnlock(%p) exit", lock);
+}
+
+
+#else
+
+
+/* It should be adjusted with the "spinlock_count" directive. */
+static nxt_uint_t nxt_spinlock_count = 1000;
+
+
+void
+nxt_thread_spin_init(nxt_uint_t ncpu, nxt_uint_t count)
+{
+ switch (ncpu) {
+
+ case 0:
+ /* Explicit spinlock count. */
+ nxt_spinlock_count = count;
+ break;
+
+ case 1:
+ /* Spinning is useless on UP. */
+ nxt_spinlock_count = 0;
+ break;
+
+ default:
+ /*
+ * SMP.
+ *
+ * TODO: The count should be 10 on a virtualized system
+ * since virtualized CPUs may share the same physical CPU.
+ */
+ nxt_spinlock_count = 1000;
+ break;
+ }
+}
+
+
+void
+nxt_thread_spin_lock(nxt_thread_spinlock_t *lock)
+{
+ nxt_uint_t n;
+
+ nxt_thread_log_debug("spin_lock(%p) enter", lock);
+
+ for ( ;; ) {
+
+ again:
+
+ if (nxt_fast_path(nxt_atomic_try_lock(lock))) {
+ return;
+ }
+
+ for (n = nxt_spinlock_count; n != 0; n--) {
+
+ nxt_cpu_pause();
+
+ if (*lock == 0) {
+ goto again;
+ }
+ }
+
+ nxt_thread_yield();
+ }
+}
+
+
+nxt_bool_t
+nxt_thread_spin_trylock(nxt_thread_spinlock_t *lock)
+{
+ nxt_thread_log_debug("spin_trylock(%p) enter", lock);
+
+ if (nxt_fast_path(nxt_atomic_try_lock(lock))) {
+ return 1;
+ }
+
+ nxt_thread_log_debug("spin_trylock(%p) failed", lock);
+
+ return 0;
+}
+
+
+void
+nxt_thread_spin_unlock(nxt_thread_spinlock_t *lock)
+{
+ nxt_atomic_release(lock);
+
+ nxt_thread_log_debug("spin_unlock(%p) exit", lock);
+}
+
+#endif