+2005-07-18 Bruno Haible <bruno@clisp.org>
+
+ * modules/lock: New file.
+ * MODULES.html.sh (Multithreading): New section.
+
2005-07-15 Paul Eggert <eggert@cs.ucla.edu>
* modules/xalloc (Depends-on): Add xalloc-die.
func_module canon-host
func_end_table
+ element="Multithreading"
+ element=`printf "%s" "$element" | sed -e "$sed_lt" -e "$sed_gt"`
+ func_section_wrap posix_ext_thread
+ func_wrap H3
+ func_echo "$element"
+
+ func_begin_table
+ func_module lock
+ func_end_table
+
element="Internationalization functions"
element=`printf "%s" "$element" | sed -e "$sed_lt" -e "$sed_gt"`
func_section_wrap posix_ext_i18n
+2005-07-18 Bruno Haible <bruno@clisp.org>
+
+ * lock.h: New file, from GNU gettext.
+ * lock.c: New file, from GNU gettext.
+
2005-07-11 Paul Eggert <eggert@cs.ucla.edu>
* version-etc-fsf.c (version_etc_copyright): Parameterize the
--- /dev/null
+/* Locking in multithreaded situations.
+ Copyright (C) 2005 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Library General Public License as published
+ by the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ USA. */
+
+/* Written by Bruno Haible <bruno@clisp.org>, 2005.
+ Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
+ gthr-win32.h. */
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "lock.h"
+
+/* ========================================================================= */
+
+#if USE_POSIX_THREADS
+
+/* Use the POSIX threads library. */
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+# if HAVE_PTHREAD_RWLOCK
+
+# if !defined PTHREAD_RWLOCK_INITIALIZER
+
+void
+glthread_rwlock_init (gl_rwlock_t *lock)
+{
+ if (pthread_rwlock_init (&lock->rwlock, NULL) != 0)
+ abort ();
+ lock->initialized = 1;
+}
+
+void
+glthread_rwlock_rdlock (gl_rwlock_t *lock)
+{
+ if (!lock->initialized)
+ {
+ if (pthread_mutex_lock (&lock->guard) != 0)
+ abort ();
+ if (!lock->initialized)
+ glthread_rwlock_init (lock);
+ if (pthread_mutex_unlock (&lock->guard) != 0)
+ abort ();
+ }
+ if (pthread_rwlock_rdlock (&lock->rwlock) != 0)
+ abort ();
+}
+
+void
+glthread_rwlock_wrlock (gl_rwlock_t *lock)
+{
+ if (!lock->initialized)
+ {
+ if (pthread_mutex_lock (&lock->guard) != 0)
+ abort ();
+ if (!lock->initialized)
+ glthread_rwlock_init (lock);
+ if (pthread_mutex_unlock (&lock->guard) != 0)
+ abort ();
+ }
+ if (pthread_rwlock_wrlock (&lock->rwlock) != 0)
+ abort ();
+}
+
+void
+glthread_rwlock_unlock (gl_rwlock_t *lock)
+{
+ if (!lock->initialized)
+ abort ();
+ if (pthread_rwlock_unlock (&lock->rwlock) != 0)
+ abort ();
+}
+
+void
+glthread_rwlock_destroy (gl_rwlock_t *lock)
+{
+ if (!lock->initialized)
+ abort ();
+ if (pthread_rwlock_destroy (&lock->rwlock) != 0)
+ abort ();
+ lock->initialized = 0;
+}
+
+# endif
+
+# else
+
+void
+glthread_rwlock_init (gl_rwlock_t *lock)
+{
+ if (pthread_mutex_init (&lock->lock, NULL) != 0)
+ abort ();
+ if (pthread_cond_init (&lock->waiting_readers, NULL) != 0)
+ abort ();
+ if (pthread_cond_init (&lock->waiting_writers, NULL) != 0)
+ abort ();
+ lock->waiting_writers_count = 0;
+ lock->runcount = 0;
+}
+
+void
+glthread_rwlock_rdlock (gl_rwlock_t *lock)
+{
+ if (pthread_mutex_lock (&lock->lock) != 0)
+ abort ();
+ /* Test whether only readers are currently running, and whether the runcount
+ field will not overflow. */
+ /* POSIX says: "It is implementation-defined whether the calling thread
+ acquires the lock when a writer does not hold the lock and there are
+ writers blocked on the lock." Let's say, no: give the writers a higher
+ priority. */
+ while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
+ {
+ /* This thread has to wait for a while. Enqueue it among the
+ waiting_readers. */
+ if (pthread_cond_wait (&lock->waiting_readers, &lock->lock) != 0)
+ abort ();
+ }
+ lock->runcount++;
+ if (pthread_mutex_unlock (&lock->lock) != 0)
+ abort ();
+}
+
+void
+glthread_rwlock_wrlock (gl_rwlock_t *lock)
+{
+ if (pthread_mutex_lock (&lock->lock) != 0)
+ abort ();
+ /* Test whether no readers or writers are currently running. */
+ while (!(lock->runcount == 0))
+ {
+ /* This thread has to wait for a while. Enqueue it among the
+ waiting_writers. */
+ lock->waiting_writers_count++;
+ if (pthread_cond_wait (&lock->waiting_writers, &lock->lock) != 0)
+ abort ();
+ lock->waiting_writers_count--;
+ }
+ lock->runcount--; /* runcount becomes -1 */
+ if (pthread_mutex_unlock (&lock->lock) != 0)
+ abort ();
+}
+
+void
+glthread_rwlock_unlock (gl_rwlock_t *lock)
+{
+ if (pthread_mutex_lock (&lock->lock) != 0)
+ abort ();
+ if (lock->runcount < 0)
+ {
+ /* Drop a writer lock. */
+ if (!(lock->runcount == -1))
+ abort ();
+ lock->runcount = 0;
+ }
+ else
+ {
+ /* Drop a reader lock. */
+ if (!(lock->runcount > 0))
+ abort ();
+ lock->runcount--;
+ }
+ if (lock->runcount == 0)
+ {
+ /* POSIX recommends that "write locks shall take precedence over read
+ locks", to avoid "writer starvation". */
+ if (lock->waiting_writers_count > 0)
+ {
+ /* Wake up one of the waiting writers. */
+ if (pthread_cond_signal (&lock->waiting_writers) != 0)
+ abort ();
+ }
+ else
+ {
+ /* Wake up all waiting readers. */
+ if (pthread_cond_broadcast (&lock->waiting_readers) != 0)
+ abort ();
+ }
+ }
+ if (pthread_mutex_unlock (&lock->lock) != 0)
+ abort ();
+}
+
+void
+glthread_rwlock_destroy (gl_rwlock_t *lock)
+{
+ if (pthread_mutex_destroy (&lock->lock) != 0)
+ abort ();
+ if (pthread_cond_destroy (&lock->waiting_readers) != 0)
+ abort ();
+ if (pthread_cond_destroy (&lock->waiting_writers) != 0)
+ abort ();
+}
+
+# endif
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+# if HAVE_PTHREAD_MUTEX_RECURSIVE
+
+# if !(defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP)
+
+void
+glthread_recursive_lock_init (gl_recursive_lock_t *lock)
+{
+ pthread_mutexattr_t attributes;
+
+ if (pthread_mutexattr_init (&attributes) != 0)
+ abort ();
+ if (pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE) != 0)
+ abort ();
+ if (pthread_mutex_init (&lock->recmutex, &attributes) != 0)
+ abort ();
+ if (pthread_mutexattr_destroy (&attributes) != 0)
+ abort ();
+ lock->initialized = 1;
+}
+
+void
+glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
+{
+ if (!lock->initialized)
+ {
+ if (pthread_mutex_lock (&lock->guard) != 0)
+ abort ();
+ if (!lock->initialized)
+ glthread_recursive_lock_init (lock);
+ if (pthread_mutex_unlock (&lock->guard) != 0)
+ abort ();
+ }
+ if (pthread_mutex_lock (&lock->recmutex) != 0)
+ abort ();
+}
+
+void
+glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
+{
+ if (!lock->initialized)
+ abort ();
+ if (pthread_mutex_unlock (&lock->recmutex) != 0)
+ abort ();
+}
+
+void
+glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
+{
+ if (!lock->initialized)
+ abort ();
+ if (pthread_mutex_destroy (&lock->recmutex) != 0)
+ abort ();
+ lock->initialized = 0;
+}
+
+# endif
+
+# else
+
+void
+glthread_recursive_lock_init (gl_recursive_lock_t *lock)
+{
+ if (pthread_mutex_init (&lock->mutex, NULL) != 0)
+ abort ();
+ lock->owner = (pthread_t) 0;
+ lock->depth = 0;
+}
+
+void
+glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
+{
+ pthread_t self = pthread_self ();
+ if (lock->owner != self)
+ {
+ if (pthread_mutex_lock (&lock->mutex) != 0)
+ abort ();
+ lock->owner = self;
+ }
+ if (++(lock->depth) == 0) /* wraparound? */
+ abort ();
+}
+
+void
+glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != pthread_self ())
+ abort ();
+ if (lock->depth == 0)
+ abort ();
+ if (--(lock->depth) == 0)
+ {
+ lock->owner = (pthread_t) 0;
+ if (pthread_mutex_unlock (&lock->mutex) != 0)
+ abort ();
+ }
+}
+
+void
+glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != (pthread_t) 0)
+ abort ();
+ if (pthread_mutex_destroy (&lock->mutex) != 0)
+ abort ();
+}
+
+# endif
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_PTH_THREADS
+
+/* Use the GNU Pth threads library. */
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_SOLARIS_THREADS
+
+/* Use the old Solaris threads library. */
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+void
+glthread_recursive_lock_init (gl_recursive_lock_t *lock)
+{
+ if (mutex_init (&lock->mutex, USYNC_THREAD, NULL) != 0)
+ abort ();
+ lock->owner = (thread_t) 0;
+ lock->depth = 0;
+}
+
+void
+glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
+{
+ thread_t self = thr_self ();
+ if (lock->owner != self)
+ {
+ if (mutex_lock (&lock->mutex) != 0)
+ abort ();
+ lock->owner = self;
+ }
+ if (++(lock->depth) == 0) /* wraparound? */
+ abort ();
+}
+
+void
+glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != thr_self ())
+ abort ();
+ if (lock->depth == 0)
+ abort ();
+ if (--(lock->depth) == 0)
+ {
+ lock->owner = (thread_t) 0;
+ if (mutex_unlock (&lock->mutex) != 0)
+ abort ();
+ }
+}
+
+void
+glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != (thread_t) 0)
+ abort ();
+ if (mutex_destroy (&lock->mutex) != 0)
+ abort ();
+}
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_WIN32_THREADS
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+void
+glthread_lock_init (gl_lock_t *lock)
+{
+ InitializeCriticalSection (&lock->lock);
+ lock->guard.done = 1;
+}
+
+void
+glthread_lock_lock (gl_lock_t *lock)
+{
+ if (!lock->guard.done)
+ {
+ if (InterlockedIncrement (&lock->guard.started) == 0)
+ /* This thread is the first one to need this lock. Initialize it. */
+ glthread_lock_init (lock);
+ else
+ /* Yield the CPU while waiting for another thread to finish
+ initializing this lock. */
+ while (!lock->guard.done)
+ Sleep (0);
+ }
+ EnterCriticalSection (&lock->lock);
+}
+
+void
+glthread_lock_unlock (gl_lock_t *lock)
+{
+ if (!lock->guard.done)
+ abort ();
+ LeaveCriticalSection (&lock->lock);
+}
+
+void
+glthread_lock_destroy (gl_lock_t *lock)
+{
+ if (!lock->guard.done)
+ abort ();
+ DeleteCriticalSection (&lock->lock);
+ lock->guard.done = 0;
+}
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+static inline void
+gl_waitqueue_init (gl_waitqueue_t *wq)
+{
+ wq->array = NULL;
+ wq->count = 0;
+ wq->alloc = 0;
+ wq->offset = 0;
+}
+
+/* Enqueues the current thread, represented by an event, in a wait queue.
+ Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
+static HANDLE
+gl_waitqueue_add (gl_waitqueue_t *wq)
+{
+ HANDLE event;
+ unsigned int index;
+
+ if (wq->count == wq->alloc)
+ {
+ unsigned int new_alloc = 2 * wq->alloc + 1;
+ HANDLE *new_array =
+ (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
+ if (new_array == NULL)
+ /* No more memory. */
+ return INVALID_HANDLE_VALUE;
+ /* Now is a good opportunity to rotate the array so that its contents
+ starts at offset 0. */
+ if (wq->offset > 0)
+ {
+ unsigned int old_count = wq->count;
+ unsigned int old_alloc = wq->alloc;
+ unsigned int old_offset = wq->offset;
+ unsigned int i;
+ if (old_offset + old_count > old_alloc)
+ {
+ unsigned int limit = old_offset + old_count - old_alloc;
+ for (i = 0; i < limit; i++)
+ new_array[old_alloc + i] = new_array[i];
+ }
+ for (i = 0; i < old_count; i++)
+ new_array[i] = new_array[old_offset + i];
+ wq->offset = 0;
+ }
+ wq->array = new_array;
+ wq->alloc = new_alloc;
+ }
+ event = CreateEvent (NULL, TRUE, FALSE, NULL);
+ if (event == INVALID_HANDLE_VALUE)
+ /* No way to allocate an event. */
+ return INVALID_HANDLE_VALUE;
+ index = wq->offset + wq->count;
+ if (index >= wq->alloc)
+ index -= wq->alloc;
+ wq->array[index] = event;
+ wq->count++;
+ return event;
+}
+
+/* Notifies the first thread from a wait queue and dequeues it. */
+static inline void
+gl_waitqueue_notify_first (gl_waitqueue_t *wq)
+{
+ SetEvent (wq->array[wq->offset + 0]);
+ wq->offset++;
+ wq->count--;
+ if (wq->count == 0 || wq->offset == wq->alloc)
+ wq->offset = 0;
+}
+
+/* Notifies all threads from a wait queue and dequeues them all. */
+static inline void
+gl_waitqueue_notify_all (gl_waitqueue_t *wq)
+{
+ unsigned int i;
+
+ for (i = 0; i < wq->count; i++)
+ {
+ unsigned int index = wq->offset + i;
+ if (index >= wq->alloc)
+ index -= wq->alloc;
+ SetEvent (wq->array[index]);
+ }
+ wq->count = 0;
+ wq->offset = 0;
+}
+
+void
+glthread_rwlock_init (gl_rwlock_t *lock)
+{
+ InitializeCriticalSection (&lock->lock);
+ gl_waitqueue_init (&lock->waiting_readers);
+ gl_waitqueue_init (&lock->waiting_writers);
+ lock->runcount = 0;
+ lock->guard.done = 1;
+}
+
+void
+glthread_rwlock_rdlock (gl_rwlock_t *lock)
+{
+ if (!lock->guard.done)
+ {
+ if (InterlockedIncrement (&lock->guard.started) == 0)
+ /* This thread is the first one to need this lock. Initialize it. */
+ glthread_rwlock_init (lock);
+ else
+ /* Yield the CPU while waiting for another thread to finish
+ initializing this lock. */
+ while (!lock->guard.done)
+ Sleep (0);
+ }
+ EnterCriticalSection (&lock->lock);
+ /* Test whether only readers are currently running, and whether the runcount
+ field will not overflow. */
+ if (!(lock->runcount + 1 > 0))
+ {
+ /* This thread has to wait for a while. Enqueue it among the
+ waiting_readers. */
+ HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
+ if (event != INVALID_HANDLE_VALUE)
+ {
+ DWORD result;
+ LeaveCriticalSection (&lock->lock);
+ /* Wait until another thread signals this event. */
+ result = WaitForSingleObject (event, INFINITE);
+ if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
+ abort ();
+ CloseHandle (event);
+ /* The thread which signalled the event already did the bookkeeping:
+ removed us from the waiting_readers, incremented lock->runcount. */
+ if (!(lock->runcount > 0))
+ abort ();
+ return;
+ }
+ else
+ {
+ /* Allocation failure. Weird. */
+ do
+ {
+ LeaveCriticalSection (&lock->lock);
+ Sleep (1);
+ EnterCriticalSection (&lock->lock);
+ }
+ while (!(lock->runcount + 1 > 0));
+ }
+ }
+ lock->runcount++;
+ LeaveCriticalSection (&lock->lock);
+}
+
+void
+glthread_rwlock_wrlock (gl_rwlock_t *lock)
+{
+ if (!lock->guard.done)
+ {
+ if (InterlockedIncrement (&lock->guard.started) == 0)
+ /* This thread is the first one to need this lock. Initialize it. */
+ glthread_rwlock_init (lock);
+ else
+ /* Yield the CPU while waiting for another thread to finish
+ initializing this lock. */
+ while (!lock->guard.done)
+ Sleep (0);
+ }
+ EnterCriticalSection (&lock->lock);
+ /* Test whether no readers or writers are currently running. */
+ if (!(lock->runcount == 0))
+ {
+ /* This thread has to wait for a while. Enqueue it among the
+ waiting_writers. */
+ HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
+ if (event != INVALID_HANDLE_VALUE)
+ {
+ DWORD result;
+ LeaveCriticalSection (&lock->lock);
+ /* Wait until another thread signals this event. */
+ result = WaitForSingleObject (event, INFINITE);
+ if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
+ abort ();
+ CloseHandle (event);
+ /* The thread which signalled the event already did the bookkeeping:
+ removed us from the waiting_writers, set lock->runcount = -1. */
+ if (!(lock->runcount == -1))
+ abort ();
+ return;
+ }
+ else
+ {
+ /* Allocation failure. Weird. */
+ do
+ {
+ LeaveCriticalSection (&lock->lock);
+ Sleep (1);
+ EnterCriticalSection (&lock->lock);
+ }
+ while (!(lock->runcount == 0));
+ }
+ }
+ lock->runcount--; /* runcount becomes -1 */
+ LeaveCriticalSection (&lock->lock);
+}
+
+void
+glthread_rwlock_unlock (gl_rwlock_t *lock)
+{
+ if (!lock->guard.done)
+ abort ();
+ EnterCriticalSection (&lock->lock);
+ if (lock->runcount < 0)
+ {
+ /* Drop a writer lock. */
+ if (!(lock->runcount == -1))
+ abort ();
+ lock->runcount = 0;
+ }
+ else
+ {
+ /* Drop a reader lock. */
+ if (!(lock->runcount > 0))
+ abort ();
+ lock->runcount--;
+ }
+ if (lock->runcount == 0)
+ {
+ /* POSIX recommends that "write locks shall take precedence over read
+ locks", to avoid "writer starvation". */
+ if (lock->waiting_writers.count > 0)
+ {
+ /* Wake up one of the waiting writers. */
+ lock->runcount--;
+ gl_waitqueue_notify_first (&lock->waiting_writers);
+ }
+ else
+ {
+ /* Wake up all waiting readers. */
+ lock->runcount += lock->waiting_readers.count;
+ gl_waitqueue_notify_all (&lock->waiting_readers);
+ }
+ }
+ LeaveCriticalSection (&lock->lock);
+}
+
+void
+glthread_rwlock_destroy (gl_rwlock_t *lock)
+{
+ if (!lock->guard.done)
+ abort ();
+ if (lock->runcount != 0)
+ abort ();
+ DeleteCriticalSection (&lock->lock);
+ if (lock->waiting_readers.array != NULL)
+ free (lock->waiting_readers.array);
+ if (lock->waiting_writers.array != NULL)
+ free (lock->waiting_writers.array);
+ lock->guard.done = 0;
+}
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+void
+glthread_recursive_lock_init (gl_recursive_lock_t *lock)
+{
+ lock->owner = 0;
+ lock->depth = 0;
+ InitializeCriticalSection (&lock->lock);
+ lock->guard.done = 1;
+}
+
+void
+glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
+{
+ if (!lock->guard.done)
+ {
+ if (InterlockedIncrement (&lock->guard.started) == 0)
+ /* This thread is the first one to need this lock. Initialize it. */
+ glthread_recursive_lock_init (lock);
+ else
+ /* Yield the CPU while waiting for another thread to finish
+ initializing this lock. */
+ while (!lock->guard.done)
+ Sleep (0);
+ }
+ {
+ DWORD self = GetCurrentThreadId ();
+ if (lock->owner != self)
+ {
+ EnterCriticalSection (&lock->lock);
+ lock->owner = self;
+ }
+ if (++(lock->depth) == 0) /* wraparound? */
+ abort ();
+ }
+}
+
+void
+glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != GetCurrentThreadId ())
+ abort ();
+ if (lock->depth == 0)
+ abort ();
+ if (--(lock->depth) == 0)
+ {
+ lock->owner = 0;
+ LeaveCriticalSection (&lock->lock);
+ }
+}
+
+void
+glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != 0)
+ abort ();
+ DeleteCriticalSection (&lock->lock);
+ lock->guard.done = 0;
+}
+
+#endif
+
+/* ========================================================================= */
--- /dev/null
+/* Locking in multithreaded situations.
+ Copyright (C) 2005 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Library General Public License as published
+ by the Free Software Foundation; either version 2, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ USA. */
+
+/* Written by Bruno Haible <bruno@clisp.org>, 2005.
+ Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
+ gthr-win32.h. */
+
+/* This file contains locking primitives for use with a given thread library.
+ It does not contain primitives for creating threads or for other
+ synchronization primitives.
+
+ Normal (non-recursive) locks:
+ Type: gl_lock_t
+ Declaration: gl_lock_define(extern, name)
+ Initializer: gl_lock_define_initialized(, name)
+ Initialization: gl_lock_init (name);
+ Taking the lock: gl_lock_lock (name);
+ Releasing the lock: gl_lock_unlock (name);
+ De-initialization: gl_lock_destroy (name);
+
+ Read-Write (non-recursive) locks:
+ Type: gl_rwlock_t
+ Declaration: gl_rwlock_define(extern, name)
+ Initializer: gl_rwlock_define_initialized(, name)
+ Initialization: gl_rwlock_init (name);
+ Taking the lock: gl_rwlock_rdlock (name);
+ gl_rwlock_wrlock (name);
+ Releasing the lock: gl_rwlock_unlock (name);
+ De-initialization: gl_rwlock_destroy (name);
+
+ Recursive locks:
+ Type: gl_recursive_lock_t
+ Declaration: gl_recursive_lock_define(extern, name)
+ Initializer: gl_recursive_lock_define_initialized(, name)
+ Initialization: gl_recursive_lock_init (name);
+ Taking the lock: gl_recursive_lock_lock (name);
+ Releasing the lock: gl_recursive_lock_unlock (name);
+ De-initialization: gl_recursive_lock_destroy (name);
+*/
+
+
+/* ========================================================================= */
+
+#if USE_POSIX_THREADS
+
+/* Use the POSIX threads library. */
+
+# include <pthread.h>
+# include <stdlib.h>
+
+# if USE_POSIX_THREADS_WEAK
+
+/* Use weak references to the POSIX threads library. */
+
+/* Weak references avoid dragging in external libraries if the other parts
+ of the program don't use them. Here we use them, because we don't want
+ every program that uses libintl to depend on libpthread. This assumes
+ that libpthread would not be loaded after libintl; i.e. if libintl is
+ loaded first, by an executable that does not depend on libpthread, and
+ then a module is dynamically loaded that depends on libpthread, libintl
+ will not be multithread-safe. */
+
+/* The way to test at runtime whether libpthread is present is to test
+ whether a function pointer's value, such as &pthread_mutex_init, is
+ non-NULL. However, some versions of GCC have a bug through which, in
+ PIC mode, &foo != NULL always evaluates to true if there is a direct
+ call to foo(...) in the same function. To avoid this, we test the
+ address of a function in libpthread that we don't use. */
+
+# pragma weak pthread_mutex_init
+# pragma weak pthread_mutex_lock
+# pragma weak pthread_mutex_unlock
+# pragma weak pthread_mutex_destroy
+# pragma weak pthread_rwlock_init
+# pragma weak pthread_rwlock_rdlock
+# pragma weak pthread_rwlock_wrlock
+# pragma weak pthread_rwlock_unlock
+# pragma weak pthread_rwlock_destroy
+# pragma weak pthread_cond_init
+# pragma weak pthread_cond_wait
+# pragma weak pthread_cond_signal
+# pragma weak pthread_cond_broadcast
+# pragma weak pthread_cond_destroy
+# pragma weak pthread_mutexattr_init
+# pragma weak pthread_mutexattr_settype
+# pragma weak pthread_mutexattr_destroy
+# ifndef pthread_self
+# pragma weak pthread_self
+# endif
+
+# pragma weak pthread_cancel
+# define pthread_in_use() (pthread_cancel != NULL)
+
+# else
+
+# define pthread_in_use() 1
+
+# endif
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+typedef pthread_mutex_t gl_lock_t;
+# define gl_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_mutex_t NAME;
+# define gl_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_mutex_t NAME = PTHREAD_MUTEX_INITIALIZER;
+# define gl_lock_init(NAME) \
+ if (pthread_in_use () && pthread_mutex_init (&NAME, NULL) != 0) abort ()
+# define gl_lock_lock(NAME) \
+ if (pthread_in_use () && pthread_mutex_lock (&NAME) != 0) abort ()
+# define gl_lock_unlock(NAME) \
+ if (pthread_in_use () && pthread_mutex_unlock (&NAME) != 0) abort ()
+# define gl_lock_destroy(NAME) \
+ if (pthread_in_use () && pthread_mutex_destroy (&NAME) != 0) abort ()
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+# if HAVE_PTHREAD_RWLOCK
+
+# ifdef PTHREAD_RWLOCK_INITIALIZER
+
+typedef pthread_rwlock_t gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_rwlock_t NAME = PTHREAD_RWLOCK_INITIALIZER;
+# define gl_rwlock_init(NAME) \
+ if (pthread_in_use () && pthread_rwlock_init (&NAME, NULL) != 0) abort ()
+# define gl_rwlock_rdlock(NAME) \
+ if (pthread_in_use () && pthread_rwlock_rdlock (&NAME) != 0) abort ()
+# define gl_rwlock_wrlock(NAME) \
+ if (pthread_in_use () && pthread_rwlock_wrlock (&NAME) != 0) abort ()
+# define gl_rwlock_unlock(NAME) \
+ if (pthread_in_use () && pthread_rwlock_unlock (&NAME) != 0) abort ()
+# define gl_rwlock_destroy(NAME) \
+ if (pthread_in_use () && pthread_rwlock_destroy (&NAME) != 0) abort ()
+
+# else
+
+typedef struct
+ {
+ int initialized;
+ pthread_mutex_t guard; /* protects the initialization */
+ pthread_rwlock_t rwlock; /* read-write lock */
+ }
+ gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME = { 0, PTHREAD_MUTEX_INITIALIZER };
+# define gl_rwlock_init(NAME) \
+ if (pthread_in_use ()) glthread_rwlock_init (&NAME)
+# define gl_rwlock_rdlock(NAME) \
+ if (pthread_in_use ()) glthread_rwlock_rdlock (&NAME)
+# define gl_rwlock_wrlock(NAME) \
+ if (pthread_in_use ()) glthread_rwlock_wrlock (&NAME)
+# define gl_rwlock_unlock(NAME) \
+ if (pthread_in_use ()) glthread_rwlock_unlock (&NAME)
+# define gl_rwlock_destroy(NAME) \
+ if (pthread_in_use ()) glthread_rwlock_destroy (&NAME)
+extern void glthread_rwlock_init (gl_rwlock_t *lock);
+extern void glthread_rwlock_rdlock (gl_rwlock_t *lock);
+extern void glthread_rwlock_wrlock (gl_rwlock_t *lock);
+extern void glthread_rwlock_unlock (gl_rwlock_t *lock);
+extern void glthread_rwlock_destroy (gl_rwlock_t *lock);
+
+# endif
+
+# else
+
+typedef struct
+ {
+ pthread_mutex_t lock; /* protects the remaining fields */
+ pthread_cond_t waiting_readers; /* waiting readers */
+ pthread_cond_t waiting_writers; /* waiting writers */
+ unsigned int waiting_writers_count; /* number of waiting writers */
+ int runcount; /* number of readers running, or -1 when a writer runs */
+ }
+ gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME = \
+ { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, PTHREAD_COND_INITIALIZER, 0, 0 };
+# define gl_rwlock_init(NAME) \
+ if (pthread_in_use ()) glthread_rwlock_init (&NAME)
+# define gl_rwlock_rdlock(NAME) \
+ if (pthread_in_use ()) glthread_rwlock_rdlock (&NAME)
+# define gl_rwlock_wrlock(NAME) \
+ if (pthread_in_use ()) glthread_rwlock_wrlock (&NAME)
+# define gl_rwlock_unlock(NAME) \
+ if (pthread_in_use ()) glthread_rwlock_unlock (&NAME)
+# define gl_rwlock_destroy(NAME) \
+ if (pthread_in_use ()) glthread_rwlock_destroy (&NAME)
+extern void glthread_rwlock_init (gl_rwlock_t *lock);
+extern void glthread_rwlock_rdlock (gl_rwlock_t *lock);
+extern void glthread_rwlock_wrlock (gl_rwlock_t *lock);
+extern void glthread_rwlock_unlock (gl_rwlock_t *lock);
+extern void glthread_rwlock_destroy (gl_rwlock_t *lock);
+
+# endif
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+# if HAVE_PTHREAD_MUTEX_RECURSIVE
+
+# if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+
+typedef pthread_mutex_t gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_mutex_t NAME;
+# ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_mutex_t NAME = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
+# else
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_mutex_t NAME = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+# endif
+# define gl_recursive_lock_init(NAME) \
+ if (pthread_in_use () && pthread_mutex_init (&NAME, NULL) != 0) abort ()
+# define gl_recursive_lock_lock(NAME) \
+ if (pthread_in_use () && pthread_mutex_lock (&NAME) != 0) abort ()
+# define gl_recursive_lock_unlock(NAME) \
+ if (pthread_in_use () && pthread_mutex_unlock (&NAME) != 0) abort ()
+# define gl_recursive_lock_destroy(NAME) \
+ if (pthread_in_use () && pthread_mutex_destroy (&NAME) != 0) abort ()
+
+# else
+
+typedef struct
+ {
+ pthread_mutex_t recmutex; /* recursive mutex */
+ pthread_mutex_t guard; /* protects the initialization */
+ int initialized;
+ }
+ gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME;
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME = \
+ { PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, 0 };
+# define gl_recursive_lock_init(NAME) \
+ if (pthread_in_use ()) glthread_recursive_lock_init (&NAME)
+# define gl_recursive_lock_lock(NAME) \
+ if (pthread_in_use ()) glthread_recursive_lock_lock (&NAME)
+# define gl_recursive_lock_unlock(NAME) \
+ if (pthread_in_use ()) glthread_recursive_lock_unlock (&NAME)
+# define gl_recursive_lock_destroy(NAME) \
+ if (pthread_in_use ()) glthread_recursive_lock_destroy (&NAME)
+extern void glthread_recursive_lock_init (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_lock (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_unlock (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_destroy (gl_recursive_lock_t *lock);
+
+# endif
+
+# else
+
+/* Old versions of POSIX threads on Solaris did not have recursive locks.
+ We have to implement them ourselves. */
+
+typedef struct
+ {
+ pthread_mutex_t mutex;
+ pthread_t owner;
+ unsigned long depth;
+ }
+ gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME;
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME = \
+ { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, 0 };
+# define gl_recursive_lock_init(NAME) \
+ if (pthread_in_use ()) glthread_recursive_lock_init (&NAME)
+# define gl_recursive_lock_lock(NAME) \
+ if (pthread_in_use ()) glthread_recursive_lock_lock (&NAME)
+# define gl_recursive_lock_unlock(NAME) \
+ if (pthread_in_use ()) glthread_recursive_lock_unlock (&NAME)
+# define gl_recursive_lock_destroy(NAME) \
+ if (pthread_in_use ()) glthread_recursive_lock_destroy (&NAME)
+extern void glthread_recursive_lock_init (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_lock (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_unlock (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_destroy (gl_recursive_lock_t *lock);
+
+# endif
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_PTH_THREADS
+
+/* Use the GNU Pth threads library. */
+
+# include <pth.h>
+# include <stdlib.h>
+
+# if USE_PTH_THREADS_WEAK
+
+/* Use weak references to the GNU Pth threads library. */
+
+# pragma weak pth_mutex_init
+# pragma weak pth_mutex_acquire
+# pragma weak pth_mutex_release
+# pragma weak pth_rwlock_init
+# pragma weak pth_rwlock_acquire
+# pragma weak pth_rwlock_release
+
+# pragma weak pth_cancel
+# define pth_in_use() (pth_cancel != NULL)
+
+# else
+
+# define pth_in_use() 1
+
+# endif
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+typedef pth_mutex_t gl_lock_t;
+# define gl_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pth_mutex_t NAME;
+# define gl_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pth_mutex_t NAME = PTH_MUTEX_INIT;
+# define gl_lock_init(NAME) \
+ if (pth_in_use() && !pth_mutex_init (&NAME)) abort ()
+# define gl_lock_lock(NAME) \
+ if (pth_in_use() && !pth_mutex_acquire (&NAME, 0, NULL)) abort ()
+# define gl_lock_unlock(NAME) \
+ if (pth_in_use() && !pth_mutex_release (&NAME)) abort ()
+# define gl_lock_destroy(NAME) \
+ (void)(&NAME)
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+typedef pth_rwlock_t gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pth_rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pth_rwlock_t NAME = PTH_RWLOCK_INIT;
+# define gl_rwlock_init(NAME) \
+ if (pth_in_use() && !pth_rwlock_init (&NAME)) abort ()
+# define gl_rwlock_rdlock(NAME) \
+ if (pth_in_use() && !pth_rwlock_acquire (&NAME, PTH_RWLOCK_RD, 0, NULL)) abort ()
+# define gl_rwlock_wrlock(NAME) \
+ if (pth_in_use() && !pth_rwlock_acquire (&NAME, PTH_RWLOCK_RW, 0, NULL)) abort ()
+# define gl_rwlock_unlock(NAME) \
+ if (pth_in_use() && !pth_rwlock_release (&NAME)) abort ()
+# define gl_rwlock_destroy(NAME) \
+ (void)(&NAME)
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+/* In Pth, mutexes are recursive by default. */
+typedef pth_mutex_t gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pth_mutex_t NAME;
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pth_mutex_t NAME = PTH_MUTEX_INIT;
+# define gl_recursive_lock_init(NAME) \
+ if (pth_in_use() && !pth_mutex_init (&NAME)) abort ()
+# define gl_recursive_lock_lock(NAME) \
+ if (pth_in_use() && !pth_mutex_acquire (&NAME, 0, NULL)) abort ()
+# define gl_recursive_lock_unlock(NAME) \
+ if (pth_in_use() && !pth_mutex_release (&NAME)) abort ()
+# define gl_recursive_lock_destroy(NAME) \
+ (void)(&NAME)
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_SOLARIS_THREADS
+
+/* Use the old Solaris threads library. */
+
+# include <thread.h>
+# include <synch.h>
+# include <stdlib.h>
+
+# if USE_SOLARIS_THREADS_WEAK
+
+/* Use weak references to the old Solaris threads library. */
+
+# pragma weak mutex_init
+# pragma weak mutex_lock
+# pragma weak mutex_unlock
+# pragma weak mutex_destroy
+# pragma weak rwlock_init
+# pragma weak rw_rdlock
+# pragma weak rw_wrlock
+# pragma weak rw_unlock
+# pragma weak rwlock_destroy
+# pragma weak thr_self
+
+# pragma weak thr_suspend
+# define thread_in_use() (thr_suspend != NULL)
+
+# else
+
+# define thread_in_use() 1
+
+# endif
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+typedef mutex_t gl_lock_t;
+# define gl_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS mutex_t NAME;
+# define gl_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS mutex_t NAME = DEFAULTMUTEX;
+# define gl_lock_init(NAME) \
+ if (thread_in_use () && mutex_init (&NAME, USYNC_THREAD, NULL) != 0) abort ()
+# define gl_lock_lock(NAME) \
+ if (thread_in_use () && mutex_lock (&NAME) != 0) abort ()
+# define gl_lock_unlock(NAME) \
+ if (thread_in_use () && mutex_unlock (&NAME) != 0) abort ()
+# define gl_lock_destroy(NAME) \
+ if (thread_in_use () && mutex_destroy (&NAME) != 0) abort ()
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+typedef rwlock_t gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS rwlock_t NAME = DEFAULTRWLOCK;
+# define gl_rwlock_init(NAME) \
+ if (thread_in_use () && rwlock_init (&NAME, USYNC_THREAD, NULL) != 0) abort ()
+# define gl_rwlock_rdlock(NAME) \
+ if (thread_in_use () && rw_rdlock (&NAME) != 0) abort ()
+# define gl_rwlock_wrlock(NAME) \
+ if (thread_in_use () && rw_wrlock (&NAME) != 0) abort ()
+# define gl_rwlock_unlock(NAME) \
+ if (thread_in_use () && rw_unlock (&NAME) != 0) abort ()
+# define gl_rwlock_destroy(NAME) \
+ if (thread_in_use () && rwlock_destroy (&NAME) != 0) abort ()
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+/* Old Solaris threads did not have recursive locks.
+ We have to implement them ourselves. */
+
+typedef struct
+ {
+ mutex_t mutex;
+ thread_t owner;
+ unsigned long depth;
+ }
+ gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME;
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME = { DEFAULTMUTEX, (thread_t) 0, 0 };
+# define gl_recursive_lock_init(NAME) \
+ if (thread_in_use ()) glthread_recursive_lock_init (&NAME)
+# define gl_recursive_lock_lock(NAME) \
+ if (thread_in_use ()) glthread_recursive_lock_lock (&NAME)
+# define gl_recursive_lock_unlock(NAME) \
+ if (thread_in_use ()) glthread_recursive_lock_unlock (&NAME)
+# define gl_recursive_lock_destroy(NAME) \
+ if (thread_in_use ()) glthread_recursive_lock_destroy (&NAME)
+extern void glthread_recursive_lock_init (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_lock (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_unlock (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_destroy (gl_recursive_lock_t *lock);
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_WIN32_THREADS
+
+# include <windows.h>
+
+/* We can use CRITICAL_SECTION directly, rather than the Win32 Event, Mutex,
+ Semaphore types, because
+ - we need only to synchronize inside a single process (address space),
+ not inter-process locking,
+ - we don't need to support trylock operations. (TryEnterCriticalSection
+ does not work on Windows 95/98/ME. Packages that need trylock usually
+ define their own mutex type.) */
+
+/* There is no way to statically initialize a CRITICAL_SECTION. It needs
+ to be done lazily, once only. For this we need spinlocks. */
+
+typedef struct { volatile int done; volatile long started; } gl_spinlock_t;
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+typedef struct
+ {
+ gl_spinlock_t guard; /* protects the initialization */
+ CRITICAL_SECTION lock;
+ }
+ gl_lock_t;
+# define gl_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_lock_t NAME;
+# define gl_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_lock_t NAME = { { 0, -1 } };
+# define gl_lock_init(NAME) \
+ glthread_lock_init (&NAME)
+# define gl_lock_lock(NAME) \
+ glthread_lock_lock (&NAME)
+# define gl_lock_unlock(NAME) \
+ glthread_lock_unlock (&NAME)
+# define gl_lock_destroy(NAME) \
+ glthread_lock_destroy (&NAME)
+extern void glthread_lock_init (gl_lock_t *lock);
+extern void glthread_lock_lock (gl_lock_t *lock);
+extern void glthread_lock_unlock (gl_lock_t *lock);
+extern void glthread_lock_destroy (gl_lock_t *lock);
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+/* It is impossible to implement read-write locks using plain locks, without
+ introducing an extra thread dedicated to managing read-write locks.
+ Therefore here we need to use the low-level Event type. */
+
+typedef struct
+ {
+ HANDLE *array; /* array of waiting threads, each represented by an event */
+ unsigned int count; /* number of waiting threads */
+ unsigned int alloc; /* length of allocated array */
+ unsigned int offset; /* index of first waiting thread in array */
+ }
+ gl_waitqueue_t;
+typedef struct
+ {
+ gl_spinlock_t guard; /* protects the initialization */
+ CRITICAL_SECTION lock; /* protects the remaining fields */
+ gl_waitqueue_t waiting_readers; /* waiting readers */
+ gl_waitqueue_t waiting_writers; /* waiting writers */
+ int runcount; /* number of readers running, or -1 when a writer runs */
+ }
+ gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME = { { 0, -1 } };
+# define gl_rwlock_init(NAME) \
+ glthread_rwlock_init (&NAME)
+# define gl_rwlock_rdlock(NAME) \
+ glthread_rwlock_rdlock (&NAME)
+# define gl_rwlock_wrlock(NAME) \
+ glthread_rwlock_wrlock (&NAME)
+# define gl_rwlock_unlock(NAME) \
+ glthread_rwlock_unlock (&NAME)
+# define gl_rwlock_destroy(NAME) \
+ glthread_rwlock_destroy (&NAME)
+extern void glthread_rwlock_init (gl_rwlock_t *lock);
+extern void glthread_rwlock_rdlock (gl_rwlock_t *lock);
+extern void glthread_rwlock_wrlock (gl_rwlock_t *lock);
+extern void glthread_rwlock_unlock (gl_rwlock_t *lock);
+extern void glthread_rwlock_destroy (gl_rwlock_t *lock);
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+/* The Win32 documentation says that CRITICAL_SECTION already implements a
+ recursive lock. But we need not rely on it: It's easy to implement a
+ recursive lock without this assumption. */
+
+typedef struct
+ {
+ gl_spinlock_t guard; /* protects the initialization */
+ DWORD owner;
+ unsigned long depth;
+ CRITICAL_SECTION lock;
+ }
+ gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME;
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME = { { 0, -1 }, 0, 0 };
+# define gl_recursive_lock_init(NAME) \
+ glthread_recursive_lock_init (&NAME)
+# define gl_recursive_lock_lock(NAME) \
+ glthread_recursive_lock_lock (&NAME)
+# define gl_recursive_lock_unlock(NAME) \
+ glthread_recursive_lock_unlock (&NAME)
+# define gl_recursive_lock_destroy(NAME) \
+ glthread_recursive_lock_destroy (&NAME)
+extern void glthread_recursive_lock_init (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_lock (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_unlock (gl_recursive_lock_t *lock);
+extern void glthread_recursive_lock_destroy (gl_recursive_lock_t *lock);
+
+#endif
+
+/* ========================================================================= */
+
+#if !(USE_POSIX_THREADS || USE_PTH_THREADS || USE_SOLARIS_THREADS || USE_WIN32_THREADS)
+
+/* Provide dummy implementation if threads are not supported. */
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+typedef int gl_lock_t;
+# define gl_lock_define(STORAGECLASS, NAME)
+# define gl_lock_define_initialized(STORAGECLASS, NAME)
+# define gl_lock_init(NAME)
+# define gl_lock_lock(NAME)
+# define gl_lock_unlock(NAME)
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+typedef int gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME)
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME)
+# define gl_rwlock_init(NAME)
+# define gl_rwlock_rdlock(NAME)
+# define gl_rwlock_wrlock(NAME)
+# define gl_rwlock_unlock(NAME)
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+typedef int gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME)
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME)
+# define gl_recursive_lock_init(NAME)
+# define gl_recursive_lock_lock(NAME)
+# define gl_recursive_lock_unlock(NAME)
+
+#endif
+2005-07-18 Bruno Haible <bruno@clisp.org>
+
+ * lock.m4: New file, from GNU gettext.
+
2005-06-01 Bruno Haible <bruno@clisp.org>
* poll.m4 (gl_FUNC_POLL): Check against MacOS X 10.4 poll() bug.
--- /dev/null
+# lock.m4 serial 1 (gettext-0.15)
+dnl Copyright (C) 2005 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+
+dnl From Bruno Haible.
+
+dnl Tests for a multithreading library to be used.
+dnl Defines at most one of the macros USE_POSIX_THREADS, USE_SOLARIS_THREADS,
+dnl USE_PTH_THREADS, USE_WIN32_THREADS
+dnl Sets the variables LIBTHREAD and LTLIBTHREAD to the linker options for use
+dnl in a Makefile (LIBTHREAD for use without libtool, LTLIBTHREAD for use with
+dnl libtool).
+dnl Sets the variables LIBMULTITHREAD and LTLIBMULTITHREAD similarly, for
+dnl programs that really need multithread functionality. The difference
+dnl between LIBTHREAD and LIBMULTITHREAD is that on platforms supporting weak
+dnl symbols, typically LIBTHREAD="" whereas LIBMULTITHREAD="-lpthread".
+
+AC_DEFUN([gl_LOCK],
+[
+ AC_REQUIRE([AC_CANONICAL_HOST])
+ AC_REQUIRE([AC_GNU_SOURCE]) dnl needed for pthread_rwlock_t on glibc systems
+ dnl Check for multithreading.
+ AC_ARG_ENABLE(threads,
+AC_HELP_STRING([--enable-threads={posix|solaris|pth|win32}], [specify multithreading API])
+AC_HELP_STRING([--disable-threads], [build without multithread safety]),
+ gl_use_threads=$enableval, gl_use_threads=yes)
+ gl_threads_api=none
+ LIBTHREAD=
+ LTLIBTHREAD=
+ LIBMULTITHREAD=
+ LTLIBMULTITHREAD=
+ if test "$gl_use_threads" != no; then
+ dnl Check whether the compiler and linker support weak declarations.
+ AC_MSG_CHECKING([whether imported symbols can be declared weak])
+ gl_have_weak=no
+ AC_TRY_LINK([extern void xyzzy ();
+#pragma weak xyzzy], [xyzzy();], [gl_have_weak=yes])
+ AC_MSG_RESULT([$gl_have_weak])
+ if test "$gl_use_threads" = yes || test "$gl_use_threads" = posix; then
+ # On OSF/1, the compiler needs the flag -pthread or -D_REENTRANT so that
+ # it groks <pthread.h>.
+ gl_save_CPPFLAGS="$CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS -D_REENTRANT"
+ AC_CHECK_HEADER(pthread.h, gl_have_pthread_h=yes, gl_have_pthread_h=no)
+ CPPFLAGS="$gl_save_CPPFLAGS"
+ if test "$gl_have_pthread_h" = yes; then
+ # Other possible tests:
+ # -lpthreads (FSU threads, PCthreads)
+ # -lgthreads
+ case "$host_os" in
+ osf*)
+ # On OSF/1, the compiler needs the flag -pthread so that it groks
+ # <pthread.h>. For the linker, it is equivalent to -lpthread.
+ if test -n "$GCC"; then
+ # gcc-2.95 doesn't understand -pthread, only -D_REENTRANT.
+ CPPFLAGS="$CPPFLAGS -D_REENTRANT"
+ else
+ CPPFLAGS="$CPPFLAGS -pthread"
+ fi
+ ;;
+ esac
+ gl_have_pthread=
+ AC_TRY_LINK([#include <pthread.h>],
+ [pthread_mutex_lock((pthread_mutex_t*)0);],
+ [gl_have_pthread=yes])
+ # Test for libpthread by looking for pthread_kill. (Not pthread_self,
+ # since it is defined as a macro on OSF/1.)
+ if test -n "$gl_have_pthread"; then
+ # The program links fine without libpthread. But it may actually
+ # need to link with libpthread in order to create multiple threads.
+ AC_CHECK_LIB(pthread, pthread_kill,
+ [LIBMULTITHREAD=-lpthread LTLIBMULTITHREAD=-lpthread])
+ else
+ # Some library is needed. Try libpthread and libc_r.
+ AC_CHECK_LIB(pthread, pthread_kill,
+ [gl_have_pthread=yes
+ LIBTHREAD=-lpthread LTLIBTHREAD=-lpthread
+ LIBMULTITHREAD=-lpthread LTLIBMULTITHREAD=-lpthread])
+ if test -z "$gl_have_pthread"; then
+ # For FreeBSD 4.
+ AC_CHECK_LIB(c_r, pthread_kill,
+ [gl_have_pthread=yes
+ LIBTHREAD=-lc_r LTLIBTHREAD=-lc_r
+ LIBMULTITHREAD=-lc_r LTLIBMULTITHREAD=-lc_r])
+ fi
+ fi
+ if test -n "$gl_have_pthread"; then
+ gl_threads_api=posix
+ AC_DEFINE([USE_POSIX_THREADS], 1,
+ [Define if the POSIX multithreading library can be used.])
+ if test -n "$LIBMULTITHREAD" || test -n "$LTLIBMULTITHREAD"; then
+ if test $gl_have_weak = yes; then
+ AC_DEFINE([USE_POSIX_THREADS_WEAK], 1,
+ [Define if references to the POSIX multithreading library should be made weak.])
+ LIBTHREAD=
+ LTLIBTHREAD=
+ fi
+ fi
+ # OSF/1 4.0 and MacOS X 10.1 lack the pthread_rwlock_t type and the
+ # pthread_rwlock_* functions.
+ AC_CHECK_TYPE([pthread_rwlock_t],
+ [AC_DEFINE([HAVE_PTHREAD_RWLOCK], 1,
+ [Define if the POSIX multithreading library has read/write locks.])],
+ [],
+ [#include <pthread.h>])
+ # glibc defines PTHREAD_MUTEX_RECURSIVE as enum, not as a macro.
+ AC_TRY_COMPILE([#include <pthread.h>],
+ [#if __FreeBSD__ == 4
+error "No, in FreeBSD 4.0 recursive mutexes actually don't work."
+#else
+int x = (int)PTHREAD_MUTEX_RECURSIVE;
+#endif],
+ [AC_DEFINE([HAVE_PTHREAD_MUTEX_RECURSIVE], 1,
+ [Define if the <pthread.h> defines PTHREAD_MUTEX_RECURSIVE.])])
+ # Some systems optimize for single-threaded programs by default, and
+ # need special flags to disable these optimizations.
+ case "$host_os" in
+ aix* | freebsd*) CPPFLAGS="$CPPFLAGS -D_THREAD_SAFE" ;;
+ solaris*) CPPFLAGS="$CPPFLAGS -D_REENTRANT" ;;
+ esac
+ fi
+ fi
+ fi
+ if test -z "$gl_have_pthread"; then
+ if test "$gl_use_threads" = yes || test "$gl_use_threads" = solaris; then
+ gl_have_solaristhread=
+ gl_save_LIBS="$LIBS"
+ LIBS="$LIBS -lthread"
+ AC_TRY_LINK([#include <thread.h>
+#include <synch.h>],
+ [thr_self();],
+ [gl_have_solaristhread=yes])
+ LIBS="$gl_save_LIBS"
+ if test -n "$gl_have_solaristhread"; then
+ gl_threads_api=solaris
+ LIBTHREAD=-lthread
+ LTLIBTHREAD=-lthread
+ LIBMULTITHREAD="$LIBTHREAD"
+ LTLIBMULTITHREAD="$LTLIBTHREAD"
+ AC_DEFINE([USE_SOLARIS_THREADS], 1,
+ [Define if the old Solaris multithreading library can be used.])
+ if test $gl_have_weak = yes; then
+ AC_DEFINE([USE_SOLARIS_THREADS_WEAK], 1,
+ [Define if references to the old Solaris multithreading library should be made weak.])
+ LIBTHREAD=
+ LTLIBTHREAD=
+ fi
+ fi
+ fi
+ fi
+ if test "$gl_use_threads" = pth; then
+ gl_save_CPPFLAGS="$CPPFLAGS"
+ AC_LIB_LINKFLAGS(pth)
+ gl_have_pth=
+ gl_save_LIBS="$LIBS"
+ LIBS="$LIBS -lpth"
+ AC_TRY_LINK([#include <pth.h>], [pth_self();], gl_have_pth=yes)
+ LIBS="$gl_save_LIBS"
+ if test -n "$gl_have_pth"; then
+ gl_threads_api=pth
+ LIBTHREAD="$LIBPTH"
+ LTLIBTHREAD="$LTLIBPTH"
+ LIBMULTITHREAD="$LIBTHREAD"
+ LTLIBMULTITHREAD="$LTLIBTHREAD"
+ AC_DEFINE([USE_PTH_THREADS], 1,
+ [Define if the GNU Pth multithreading library can be used.])
+ if test -n "$LIBMULTITHREAD" || test -n "$LTLIBMULTITHREAD"; then
+ if test $gl_have_weak = yes; then
+ AC_DEFINE([USE_PTH_THREADS_WEAK], 1,
+ [Define if references to the GNU Pth multithreading library should be made weak.])
+ LIBTHREAD=
+ LTLIBTHREAD=
+ fi
+ fi
+ else
+ CPPFLAGS="$gl_save_CPPFLAGS"
+ fi
+ fi
+ if test -z "$gl_have_pthread"; then
+ if test "$gl_use_threads" = yes || test "$gl_use_threads" = win32; then
+ if { case "$host_os" in
+ mingw*) true;;
+ *) false;;
+ esac
+ }; then
+ gl_threads_api=win32
+ AC_DEFINE([USE_WIN32_THREADS], 1,
+ [Define if the Win32 multithreading API can be used.])
+ fi
+ fi
+ fi
+ fi
+ AC_MSG_CHECKING([for multithread API to use])
+ AC_MSG_RESULT([$gl_threads_api])
+ AC_SUBST(LIBTHREAD)
+ AC_SUBST(LTLIBTHREAD)
+ AC_SUBST(LIBMULTITHREAD)
+ AC_SUBST(LTLIBMULTITHREAD)
+ gl_PREREQ_LOCK
+])
+
+# Prerequisites of lib/lock.c.
+AC_DEFUN([gl_PREREQ_LOCK], [
+ AC_REQUIRE([AC_C_INLINE])
+])
+
+dnl Survey of platforms:
+dnl
+dnl Platform Available Compiler Supports test-lock
+dnl flavours option weak result
+dnl --------------- --------- --------- -------- ---------
+dnl Linux 2.4/glibc posix -lpthread Y OK
+dnl
+dnl GNU Hurd/glibc posix
+dnl
+dnl FreeBSD 5.3 posix -lc_r Y
+dnl posix -lkse ? Y
+dnl posix -lpthread ? Y
+dnl posix -lthr Y
+dnl
+dnl FreeBSD 5.2 posix -lc_r Y
+dnl posix -lkse Y
+dnl posix -lthr Y
+dnl
+dnl FreeBSD 4.0,4.10 posix -lc_r Y OK
+dnl
+dnl NetBSD 1.6 --
+dnl
+dnl OpenBSD 3.4 posix -lpthread Y OK
+dnl
+dnl MacOS X 10.[123] posix -lpthread Y OK
+dnl
+dnl Solaris 7,8,9 posix -lpthread Y Sol 7,8: 0.0; Sol 9: OK
+dnl solaris -lthread Y Sol 7,8: 0.0; Sol 9: OK
+dnl
+dnl HP-UX 11 posix -lpthread Y OK
+dnl
+dnl IRIX 6.5 posix -lpthread Y 0.5
+dnl
+dnl AIX 4.3,5.1 posix -lpthread N AIX 4: 0.5; AIX 5: OK
+dnl
+dnl OSF/1 4.0,5.1 posix -pthread (cc) Y OK
+dnl -lpthread (gcc) Y
+dnl
+dnl Cygwin posix -lpthread Y OK
+dnl
+dnl Any of the above pth -lpth 0.0
+dnl
+dnl Mingw win32 N OK
+dnl
+dnl BeOS 5 --
+dnl
+dnl The test-lock result shows what happens if in test-lock.c EXPLICIT_YIELD is
+dnl turned off:
+dnl OK if all three tests terminate OK,
+dnl 0.5 if the first test terminates OK but the second one loops endlessly,
+dnl 0.0 if the first test already loops endlessly.
--- /dev/null
+Description:
+Locking in multithreaded situations.
+
+Files:
+lib/lock.h
+lib/lock.c
+m4/lock.m4
+m4/lib-ld.m4
+m4/lib-link.m4
+m4/lib-prefix.m4
+build-aux/config.rpath
+
+Depends-on:
+
+configure.ac:
+gl_LOCK
+
+Makefile.am:
+lib_SOURCES += lock.h lock.c
+
+Include:
+"lock.h"
+
+License:
+LGPL
+
+Maintainer:
+Bruno Haible
+