1 /* Locking in multithreaded situations.
2 Copyright (C) 2005 Free Software Foundation, Inc.
4 This program is free software; you can redistribute it and/or modify it
5 under the terms of the GNU Library General Public License as published
6 by the Free Software Foundation; either version 2, or (at your option)
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Library General Public License for more details.
14 You should have received a copy of the GNU Library General Public
15 License along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19 /* Written by Bruno Haible <bruno@clisp.org>, 2005.
20 Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
29 /* ========================================================================= */
33 /* Use the POSIX threads library. */
35 /* -------------------------- gl_lock_t datatype -------------------------- */
37 /* ------------------------- gl_rwlock_t datatype ------------------------- */
39 # if HAVE_PTHREAD_RWLOCK
41 # if !defined PTHREAD_RWLOCK_INITIALIZER
44 glthread_rwlock_init (gl_rwlock_t *lock)
46 if (pthread_rwlock_init (&lock->rwlock, NULL) != 0)
48 lock->initialized = 1;
52 glthread_rwlock_rdlock (gl_rwlock_t *lock)
54 if (!lock->initialized)
56 if (pthread_mutex_lock (&lock->guard) != 0)
58 if (!lock->initialized)
59 glthread_rwlock_init (lock);
60 if (pthread_mutex_unlock (&lock->guard) != 0)
63 if (pthread_rwlock_rdlock (&lock->rwlock) != 0)
68 glthread_rwlock_wrlock (gl_rwlock_t *lock)
70 if (!lock->initialized)
72 if (pthread_mutex_lock (&lock->guard) != 0)
74 if (!lock->initialized)
75 glthread_rwlock_init (lock);
76 if (pthread_mutex_unlock (&lock->guard) != 0)
79 if (pthread_rwlock_wrlock (&lock->rwlock) != 0)
84 glthread_rwlock_unlock (gl_rwlock_t *lock)
86 if (!lock->initialized)
88 if (pthread_rwlock_unlock (&lock->rwlock) != 0)
93 glthread_rwlock_destroy (gl_rwlock_t *lock)
95 if (!lock->initialized)
97 if (pthread_rwlock_destroy (&lock->rwlock) != 0)
99 lock->initialized = 0;
107 glthread_rwlock_init (gl_rwlock_t *lock)
109 if (pthread_mutex_init (&lock->lock, NULL) != 0)
111 if (pthread_cond_init (&lock->waiting_readers, NULL) != 0)
113 if (pthread_cond_init (&lock->waiting_writers, NULL) != 0)
115 lock->waiting_writers_count = 0;
120 glthread_rwlock_rdlock (gl_rwlock_t *lock)
122 if (pthread_mutex_lock (&lock->lock) != 0)
124 /* Test whether only readers are currently running, and whether the runcount
125 field will not overflow. */
126 /* POSIX says: "It is implementation-defined whether the calling thread
127 acquires the lock when a writer does not hold the lock and there are
128 writers blocked on the lock." Let's say, no: give the writers a higher
130 while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
132 /* This thread has to wait for a while. Enqueue it among the
134 if (pthread_cond_wait (&lock->waiting_readers, &lock->lock) != 0)
138 if (pthread_mutex_unlock (&lock->lock) != 0)
143 glthread_rwlock_wrlock (gl_rwlock_t *lock)
145 if (pthread_mutex_lock (&lock->lock) != 0)
147 /* Test whether no readers or writers are currently running. */
148 while (!(lock->runcount == 0))
150 /* This thread has to wait for a while. Enqueue it among the
152 lock->waiting_writers_count++;
153 if (pthread_cond_wait (&lock->waiting_writers, &lock->lock) != 0)
155 lock->waiting_writers_count--;
157 lock->runcount--; /* runcount becomes -1 */
158 if (pthread_mutex_unlock (&lock->lock) != 0)
163 glthread_rwlock_unlock (gl_rwlock_t *lock)
165 if (pthread_mutex_lock (&lock->lock) != 0)
167 if (lock->runcount < 0)
169 /* Drop a writer lock. */
170 if (!(lock->runcount == -1))
176 /* Drop a reader lock. */
177 if (!(lock->runcount > 0))
181 if (lock->runcount == 0)
183 /* POSIX recommends that "write locks shall take precedence over read
184 locks", to avoid "writer starvation". */
185 if (lock->waiting_writers_count > 0)
187 /* Wake up one of the waiting writers. */
188 if (pthread_cond_signal (&lock->waiting_writers) != 0)
193 /* Wake up all waiting readers. */
194 if (pthread_cond_broadcast (&lock->waiting_readers) != 0)
198 if (pthread_mutex_unlock (&lock->lock) != 0)
203 glthread_rwlock_destroy (gl_rwlock_t *lock)
205 if (pthread_mutex_destroy (&lock->lock) != 0)
207 if (pthread_cond_destroy (&lock->waiting_readers) != 0)
209 if (pthread_cond_destroy (&lock->waiting_writers) != 0)
215 /* --------------------- gl_recursive_lock_t datatype --------------------- */
217 # if HAVE_PTHREAD_MUTEX_RECURSIVE
219 # if !(defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP)
222 glthread_recursive_lock_init (gl_recursive_lock_t *lock)
224 pthread_mutexattr_t attributes;
226 if (pthread_mutexattr_init (&attributes) != 0)
228 if (pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE) != 0)
230 if (pthread_mutex_init (&lock->recmutex, &attributes) != 0)
232 if (pthread_mutexattr_destroy (&attributes) != 0)
234 lock->initialized = 1;
238 glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
240 if (!lock->initialized)
242 if (pthread_mutex_lock (&lock->guard) != 0)
244 if (!lock->initialized)
245 glthread_recursive_lock_init (lock);
246 if (pthread_mutex_unlock (&lock->guard) != 0)
249 if (pthread_mutex_lock (&lock->recmutex) != 0)
254 glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
256 if (!lock->initialized)
258 if (pthread_mutex_unlock (&lock->recmutex) != 0)
263 glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
265 if (!lock->initialized)
267 if (pthread_mutex_destroy (&lock->recmutex) != 0)
269 lock->initialized = 0;
277 glthread_recursive_lock_init (gl_recursive_lock_t *lock)
279 if (pthread_mutex_init (&lock->mutex, NULL) != 0)
281 lock->owner = (pthread_t) 0;
286 glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
288 pthread_t self = pthread_self ();
289 if (lock->owner != self)
291 if (pthread_mutex_lock (&lock->mutex) != 0)
295 if (++(lock->depth) == 0) /* wraparound? */
300 glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
302 if (lock->owner != pthread_self ())
304 if (lock->depth == 0)
306 if (--(lock->depth) == 0)
308 lock->owner = (pthread_t) 0;
309 if (pthread_mutex_unlock (&lock->mutex) != 0)
315 glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
317 if (lock->owner != (pthread_t) 0)
319 if (pthread_mutex_destroy (&lock->mutex) != 0)
327 /* ========================================================================= */
331 /* Use the GNU Pth threads library. */
333 /* -------------------------- gl_lock_t datatype -------------------------- */
335 /* ------------------------- gl_rwlock_t datatype ------------------------- */
337 /* --------------------- gl_recursive_lock_t datatype --------------------- */
341 /* ========================================================================= */
343 #if USE_SOLARIS_THREADS
345 /* Use the old Solaris threads library. */
347 /* -------------------------- gl_lock_t datatype -------------------------- */
349 /* ------------------------- gl_rwlock_t datatype ------------------------- */
351 /* --------------------- gl_recursive_lock_t datatype --------------------- */
354 glthread_recursive_lock_init (gl_recursive_lock_t *lock)
356 if (mutex_init (&lock->mutex, USYNC_THREAD, NULL) != 0)
358 lock->owner = (thread_t) 0;
363 glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
365 thread_t self = thr_self ();
366 if (lock->owner != self)
368 if (mutex_lock (&lock->mutex) != 0)
372 if (++(lock->depth) == 0) /* wraparound? */
377 glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
379 if (lock->owner != thr_self ())
381 if (lock->depth == 0)
383 if (--(lock->depth) == 0)
385 lock->owner = (thread_t) 0;
386 if (mutex_unlock (&lock->mutex) != 0)
392 glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
394 if (lock->owner != (thread_t) 0)
396 if (mutex_destroy (&lock->mutex) != 0)
402 /* ========================================================================= */
404 #if USE_WIN32_THREADS
406 /* -------------------------- gl_lock_t datatype -------------------------- */
409 glthread_lock_init (gl_lock_t *lock)
411 InitializeCriticalSection (&lock->lock);
412 lock->guard.done = 1;
416 glthread_lock_lock (gl_lock_t *lock)
418 if (!lock->guard.done)
420 if (InterlockedIncrement (&lock->guard.started) == 0)
421 /* This thread is the first one to need this lock. Initialize it. */
422 glthread_lock_init (lock);
424 /* Yield the CPU while waiting for another thread to finish
425 initializing this lock. */
426 while (!lock->guard.done)
429 EnterCriticalSection (&lock->lock);
433 glthread_lock_unlock (gl_lock_t *lock)
435 if (!lock->guard.done)
437 LeaveCriticalSection (&lock->lock);
441 glthread_lock_destroy (gl_lock_t *lock)
443 if (!lock->guard.done)
445 DeleteCriticalSection (&lock->lock);
446 lock->guard.done = 0;
449 /* ------------------------- gl_rwlock_t datatype ------------------------- */
452 gl_waitqueue_init (gl_waitqueue_t *wq)
460 /* Enqueues the current thread, represented by an event, in a wait queue.
461 Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
463 gl_waitqueue_add (gl_waitqueue_t *wq)
468 if (wq->count == wq->alloc)
470 unsigned int new_alloc = 2 * wq->alloc + 1;
472 (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
473 if (new_array == NULL)
474 /* No more memory. */
475 return INVALID_HANDLE_VALUE;
476 /* Now is a good opportunity to rotate the array so that its contents
477 starts at offset 0. */
480 unsigned int old_count = wq->count;
481 unsigned int old_alloc = wq->alloc;
482 unsigned int old_offset = wq->offset;
484 if (old_offset + old_count > old_alloc)
486 unsigned int limit = old_offset + old_count - old_alloc;
487 for (i = 0; i < limit; i++)
488 new_array[old_alloc + i] = new_array[i];
490 for (i = 0; i < old_count; i++)
491 new_array[i] = new_array[old_offset + i];
494 wq->array = new_array;
495 wq->alloc = new_alloc;
497 event = CreateEvent (NULL, TRUE, FALSE, NULL);
498 if (event == INVALID_HANDLE_VALUE)
499 /* No way to allocate an event. */
500 return INVALID_HANDLE_VALUE;
501 index = wq->offset + wq->count;
502 if (index >= wq->alloc)
504 wq->array[index] = event;
509 /* Notifies the first thread from a wait queue and dequeues it. */
511 gl_waitqueue_notify_first (gl_waitqueue_t *wq)
513 SetEvent (wq->array[wq->offset + 0]);
516 if (wq->count == 0 || wq->offset == wq->alloc)
520 /* Notifies all threads from a wait queue and dequeues them all. */
522 gl_waitqueue_notify_all (gl_waitqueue_t *wq)
526 for (i = 0; i < wq->count; i++)
528 unsigned int index = wq->offset + i;
529 if (index >= wq->alloc)
531 SetEvent (wq->array[index]);
538 glthread_rwlock_init (gl_rwlock_t *lock)
540 InitializeCriticalSection (&lock->lock);
541 gl_waitqueue_init (&lock->waiting_readers);
542 gl_waitqueue_init (&lock->waiting_writers);
544 lock->guard.done = 1;
548 glthread_rwlock_rdlock (gl_rwlock_t *lock)
550 if (!lock->guard.done)
552 if (InterlockedIncrement (&lock->guard.started) == 0)
553 /* This thread is the first one to need this lock. Initialize it. */
554 glthread_rwlock_init (lock);
556 /* Yield the CPU while waiting for another thread to finish
557 initializing this lock. */
558 while (!lock->guard.done)
561 EnterCriticalSection (&lock->lock);
562 /* Test whether only readers are currently running, and whether the runcount
563 field will not overflow. */
564 if (!(lock->runcount + 1 > 0))
566 /* This thread has to wait for a while. Enqueue it among the
568 HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
569 if (event != INVALID_HANDLE_VALUE)
572 LeaveCriticalSection (&lock->lock);
573 /* Wait until another thread signals this event. */
574 result = WaitForSingleObject (event, INFINITE);
575 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
578 /* The thread which signalled the event already did the bookkeeping:
579 removed us from the waiting_readers, incremented lock->runcount. */
580 if (!(lock->runcount > 0))
586 /* Allocation failure. Weird. */
589 LeaveCriticalSection (&lock->lock);
591 EnterCriticalSection (&lock->lock);
593 while (!(lock->runcount + 1 > 0));
597 LeaveCriticalSection (&lock->lock);
601 glthread_rwlock_wrlock (gl_rwlock_t *lock)
603 if (!lock->guard.done)
605 if (InterlockedIncrement (&lock->guard.started) == 0)
606 /* This thread is the first one to need this lock. Initialize it. */
607 glthread_rwlock_init (lock);
609 /* Yield the CPU while waiting for another thread to finish
610 initializing this lock. */
611 while (!lock->guard.done)
614 EnterCriticalSection (&lock->lock);
615 /* Test whether no readers or writers are currently running. */
616 if (!(lock->runcount == 0))
618 /* This thread has to wait for a while. Enqueue it among the
620 HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
621 if (event != INVALID_HANDLE_VALUE)
624 LeaveCriticalSection (&lock->lock);
625 /* Wait until another thread signals this event. */
626 result = WaitForSingleObject (event, INFINITE);
627 if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
630 /* The thread which signalled the event already did the bookkeeping:
631 removed us from the waiting_writers, set lock->runcount = -1. */
632 if (!(lock->runcount == -1))
638 /* Allocation failure. Weird. */
641 LeaveCriticalSection (&lock->lock);
643 EnterCriticalSection (&lock->lock);
645 while (!(lock->runcount == 0));
648 lock->runcount--; /* runcount becomes -1 */
649 LeaveCriticalSection (&lock->lock);
653 glthread_rwlock_unlock (gl_rwlock_t *lock)
655 if (!lock->guard.done)
657 EnterCriticalSection (&lock->lock);
658 if (lock->runcount < 0)
660 /* Drop a writer lock. */
661 if (!(lock->runcount == -1))
667 /* Drop a reader lock. */
668 if (!(lock->runcount > 0))
672 if (lock->runcount == 0)
674 /* POSIX recommends that "write locks shall take precedence over read
675 locks", to avoid "writer starvation". */
676 if (lock->waiting_writers.count > 0)
678 /* Wake up one of the waiting writers. */
680 gl_waitqueue_notify_first (&lock->waiting_writers);
684 /* Wake up all waiting readers. */
685 lock->runcount += lock->waiting_readers.count;
686 gl_waitqueue_notify_all (&lock->waiting_readers);
689 LeaveCriticalSection (&lock->lock);
693 glthread_rwlock_destroy (gl_rwlock_t *lock)
695 if (!lock->guard.done)
697 if (lock->runcount != 0)
699 DeleteCriticalSection (&lock->lock);
700 if (lock->waiting_readers.array != NULL)
701 free (lock->waiting_readers.array);
702 if (lock->waiting_writers.array != NULL)
703 free (lock->waiting_writers.array);
704 lock->guard.done = 0;
707 /* --------------------- gl_recursive_lock_t datatype --------------------- */
710 glthread_recursive_lock_init (gl_recursive_lock_t *lock)
714 InitializeCriticalSection (&lock->lock);
715 lock->guard.done = 1;
719 glthread_recursive_lock_lock (gl_recursive_lock_t *lock)
721 if (!lock->guard.done)
723 if (InterlockedIncrement (&lock->guard.started) == 0)
724 /* This thread is the first one to need this lock. Initialize it. */
725 glthread_recursive_lock_init (lock);
727 /* Yield the CPU while waiting for another thread to finish
728 initializing this lock. */
729 while (!lock->guard.done)
733 DWORD self = GetCurrentThreadId ();
734 if (lock->owner != self)
736 EnterCriticalSection (&lock->lock);
739 if (++(lock->depth) == 0) /* wraparound? */
745 glthread_recursive_lock_unlock (gl_recursive_lock_t *lock)
747 if (lock->owner != GetCurrentThreadId ())
749 if (lock->depth == 0)
751 if (--(lock->depth) == 0)
754 LeaveCriticalSection (&lock->lock);
759 glthread_recursive_lock_destroy (gl_recursive_lock_t *lock)
761 if (lock->owner != 0)
763 DeleteCriticalSection (&lock->lock);
764 lock->guard.done = 0;
769 /* ========================================================================= */