summaryrefslogtreecommitdiffstats
path: root/grub-core/lib/gnulib/glthread
diff options
context:
space:
mode:
Diffstat (limited to 'grub-core/lib/gnulib/glthread')
-rw-r--r--grub-core/lib/gnulib/glthread/lock.c1221
-rw-r--r--grub-core/lib/gnulib/glthread/lock.h988
-rw-r--r--grub-core/lib/gnulib/glthread/threadlib.c73
3 files changed, 2282 insertions, 0 deletions
diff --git a/grub-core/lib/gnulib/glthread/lock.c b/grub-core/lib/gnulib/glthread/lock.c
new file mode 100644
index 0000000..a4498cb
--- /dev/null
+++ b/grub-core/lib/gnulib/glthread/lock.c
@@ -0,0 +1,1221 @@
+/* Locking in multithreaded situations.
+ Copyright (C) 2005-2019 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <https://www.gnu.org/licenses/>. */
+
+/* Written by Bruno Haible <bruno@clisp.org>, 2005.
+ Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
+ gthr-win32.h. */
+
+#include <config.h>
+
+#include "glthread/lock.h"
+
+/* ========================================================================= */
+
+#if USE_POSIX_THREADS
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+# if HAVE_PTHREAD_RWLOCK && (HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER || (defined PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP && (__GNU_LIBRARY__ > 1)))
+
+# ifdef PTHREAD_RWLOCK_INITIALIZER
+
+# if !HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER
+ /* glibc with bug https://sourceware.org/bugzilla/show_bug.cgi?id=13701 */
+
+int
+glthread_rwlock_init_for_glibc (pthread_rwlock_t *lock)
+{
+ pthread_rwlockattr_t attributes;
+ int err;
+
+ err = pthread_rwlockattr_init (&attributes);
+ if (err != 0)
+ return err;
+ /* Note: PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP is the only value that
+ causes the writer to be preferred. PTHREAD_RWLOCK_PREFER_WRITER_NP does not
+ do this; see
+ http://man7.org/linux/man-pages/man3/pthread_rwlockattr_setkind_np.3.html */
+ err = pthread_rwlockattr_setkind_np (&attributes,
+ PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP);
+ if (err == 0)
+ err = pthread_rwlock_init(lock, &attributes);
+ /* pthread_rwlockattr_destroy always returns 0. It cannot influence the
+ return value. */
+ pthread_rwlockattr_destroy (&attributes);
+ return err;
+}
+
+# endif
+# else
+
+int
+glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
+{
+ int err;
+
+ err = pthread_rwlock_init (&lock->rwlock, NULL);
+ if (err != 0)
+ return err;
+ lock->initialized = 1;
+ return 0;
+}
+
+int
+glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
+{
+ if (!lock->initialized)
+ {
+ int err;
+
+ err = pthread_mutex_lock (&lock->guard);
+ if (err != 0)
+ return err;
+ if (!lock->initialized)
+ {
+ err = glthread_rwlock_init_multithreaded (lock);
+ if (err != 0)
+ {
+ pthread_mutex_unlock (&lock->guard);
+ return err;
+ }
+ }
+ err = pthread_mutex_unlock (&lock->guard);
+ if (err != 0)
+ return err;
+ }
+ return pthread_rwlock_rdlock (&lock->rwlock);
+}
+
+int
+glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
+{
+ if (!lock->initialized)
+ {
+ int err;
+
+ err = pthread_mutex_lock (&lock->guard);
+ if (err != 0)
+ return err;
+ if (!lock->initialized)
+ {
+ err = glthread_rwlock_init_multithreaded (lock);
+ if (err != 0)
+ {
+ pthread_mutex_unlock (&lock->guard);
+ return err;
+ }
+ }
+ err = pthread_mutex_unlock (&lock->guard);
+ if (err != 0)
+ return err;
+ }
+ return pthread_rwlock_wrlock (&lock->rwlock);
+}
+
+int
+glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
+{
+ if (!lock->initialized)
+ return EINVAL;
+ return pthread_rwlock_unlock (&lock->rwlock);
+}
+
+int
+glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
+{
+ int err;
+
+ if (!lock->initialized)
+ return EINVAL;
+ err = pthread_rwlock_destroy (&lock->rwlock);
+ if (err != 0)
+ return err;
+ lock->initialized = 0;
+ return 0;
+}
+
+# endif
+
+# else
+
+int
+glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
+{
+ int err;
+
+ err = pthread_mutex_init (&lock->lock, NULL);
+ if (err != 0)
+ return err;
+ err = pthread_cond_init (&lock->waiting_readers, NULL);
+ if (err != 0)
+ return err;
+ err = pthread_cond_init (&lock->waiting_writers, NULL);
+ if (err != 0)
+ return err;
+ lock->waiting_writers_count = 0;
+ lock->runcount = 0;
+ return 0;
+}
+
+int
+glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
+{
+ int err;
+
+ err = pthread_mutex_lock (&lock->lock);
+ if (err != 0)
+ return err;
+ /* Test whether only readers are currently running, and whether the runcount
+ field will not overflow, and whether no writer is waiting. The latter
+ condition is because POSIX recommends that "write locks shall take
+ precedence over read locks", to avoid "writer starvation". */
+ while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
+ {
+ /* This thread has to wait for a while. Enqueue it among the
+ waiting_readers. */
+ err = pthread_cond_wait (&lock->waiting_readers, &lock->lock);
+ if (err != 0)
+ {
+ pthread_mutex_unlock (&lock->lock);
+ return err;
+ }
+ }
+ lock->runcount++;
+ return pthread_mutex_unlock (&lock->lock);
+}
+
+int
+glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
+{
+ int err;
+
+ err = pthread_mutex_lock (&lock->lock);
+ if (err != 0)
+ return err;
+ /* Test whether no readers or writers are currently running. */
+ while (!(lock->runcount == 0))
+ {
+ /* This thread has to wait for a while. Enqueue it among the
+ waiting_writers. */
+ lock->waiting_writers_count++;
+ err = pthread_cond_wait (&lock->waiting_writers, &lock->lock);
+ if (err != 0)
+ {
+ lock->waiting_writers_count--;
+ pthread_mutex_unlock (&lock->lock);
+ return err;
+ }
+ lock->waiting_writers_count--;
+ }
+ lock->runcount--; /* runcount becomes -1 */
+ return pthread_mutex_unlock (&lock->lock);
+}
+
+int
+glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
+{
+ int err;
+
+ err = pthread_mutex_lock (&lock->lock);
+ if (err != 0)
+ return err;
+ if (lock->runcount < 0)
+ {
+ /* Drop a writer lock. */
+ if (!(lock->runcount == -1))
+ {
+ pthread_mutex_unlock (&lock->lock);
+ return EINVAL;
+ }
+ lock->runcount = 0;
+ }
+ else
+ {
+ /* Drop a reader lock. */
+ if (!(lock->runcount > 0))
+ {
+ pthread_mutex_unlock (&lock->lock);
+ return EINVAL;
+ }
+ lock->runcount--;
+ }
+ if (lock->runcount == 0)
+ {
+ /* POSIX recommends that "write locks shall take precedence over read
+ locks", to avoid "writer starvation". */
+ if (lock->waiting_writers_count > 0)
+ {
+ /* Wake up one of the waiting writers. */
+ err = pthread_cond_signal (&lock->waiting_writers);
+ if (err != 0)
+ {
+ pthread_mutex_unlock (&lock->lock);
+ return err;
+ }
+ }
+ else
+ {
+ /* Wake up all waiting readers. */
+ err = pthread_cond_broadcast (&lock->waiting_readers);
+ if (err != 0)
+ {
+ pthread_mutex_unlock (&lock->lock);
+ return err;
+ }
+ }
+ }
+ return pthread_mutex_unlock (&lock->lock);
+}
+
+int
+glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
+{
+ int err;
+
+ err = pthread_mutex_destroy (&lock->lock);
+ if (err != 0)
+ return err;
+ err = pthread_cond_destroy (&lock->waiting_readers);
+ if (err != 0)
+ return err;
+ err = pthread_cond_destroy (&lock->waiting_writers);
+ if (err != 0)
+ return err;
+ return 0;
+}
+
+# endif
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+# if HAVE_PTHREAD_MUTEX_RECURSIVE
+
+# if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+
+int
+glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
+{
+ pthread_mutexattr_t attributes;
+ int err;
+
+ err = pthread_mutexattr_init (&attributes);
+ if (err != 0)
+ return err;
+ err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
+ if (err != 0)
+ {
+ pthread_mutexattr_destroy (&attributes);
+ return err;
+ }
+ err = pthread_mutex_init (lock, &attributes);
+ if (err != 0)
+ {
+ pthread_mutexattr_destroy (&attributes);
+ return err;
+ }
+ err = pthread_mutexattr_destroy (&attributes);
+ if (err != 0)
+ return err;
+ return 0;
+}
+
+# else
+
+int
+glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
+{
+ pthread_mutexattr_t attributes;
+ int err;
+
+ err = pthread_mutexattr_init (&attributes);
+ if (err != 0)
+ return err;
+ err = pthread_mutexattr_settype (&attributes, PTHREAD_MUTEX_RECURSIVE);
+ if (err != 0)
+ {
+ pthread_mutexattr_destroy (&attributes);
+ return err;
+ }
+ err = pthread_mutex_init (&lock->recmutex, &attributes);
+ if (err != 0)
+ {
+ pthread_mutexattr_destroy (&attributes);
+ return err;
+ }
+ err = pthread_mutexattr_destroy (&attributes);
+ if (err != 0)
+ return err;
+ lock->initialized = 1;
+ return 0;
+}
+
+int
+glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
+{
+ if (!lock->initialized)
+ {
+ int err;
+
+ err = pthread_mutex_lock (&lock->guard);
+ if (err != 0)
+ return err;
+ if (!lock->initialized)
+ {
+ err = glthread_recursive_lock_init_multithreaded (lock);
+ if (err != 0)
+ {
+ pthread_mutex_unlock (&lock->guard);
+ return err;
+ }
+ }
+ err = pthread_mutex_unlock (&lock->guard);
+ if (err != 0)
+ return err;
+ }
+ return pthread_mutex_lock (&lock->recmutex);
+}
+
+int
+glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
+{
+ if (!lock->initialized)
+ return EINVAL;
+ return pthread_mutex_unlock (&lock->recmutex);
+}
+
+int
+glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
+{
+ int err;
+
+ if (!lock->initialized)
+ return EINVAL;
+ err = pthread_mutex_destroy (&lock->recmutex);
+ if (err != 0)
+ return err;
+ lock->initialized = 0;
+ return 0;
+}
+
+# endif
+
+# else
+
+int
+glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
+{
+ int err;
+
+ err = pthread_mutex_init (&lock->mutex, NULL);
+ if (err != 0)
+ return err;
+ lock->owner = (pthread_t) 0;
+ lock->depth = 0;
+ return 0;
+}
+
+int
+glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
+{
+ pthread_t self = pthread_self ();
+ if (lock->owner != self)
+ {
+ int err;
+
+ err = pthread_mutex_lock (&lock->mutex);
+ if (err != 0)
+ return err;
+ lock->owner = self;
+ }
+ if (++(lock->depth) == 0) /* wraparound? */
+ {
+ lock->depth--;
+ return EAGAIN;
+ }
+ return 0;
+}
+
+int
+glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != pthread_self ())
+ return EPERM;
+ if (lock->depth == 0)
+ return EINVAL;
+ if (--(lock->depth) == 0)
+ {
+ lock->owner = (pthread_t) 0;
+ return pthread_mutex_unlock (&lock->mutex);
+ }
+ else
+ return 0;
+}
+
+int
+glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != (pthread_t) 0)
+ return EBUSY;
+ return pthread_mutex_destroy (&lock->mutex);
+}
+
+# endif
+
+/* -------------------------- gl_once_t datatype -------------------------- */
+
+static const pthread_once_t fresh_once = PTHREAD_ONCE_INIT;
+
+int
+glthread_once_singlethreaded (pthread_once_t *once_control)
+{
+ /* We don't know whether pthread_once_t is an integer type, a floating-point
+ type, a pointer type, or a structure type. */
+ char *firstbyte = (char *)once_control;
+ if (*firstbyte == *(const char *)&fresh_once)
+ {
+ /* First time use of once_control. Invert the first byte. */
+ *firstbyte = ~ *(const char *)&fresh_once;
+ return 1;
+ }
+ else
+ return 0;
+}
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_PTH_THREADS
+
+/* Use the GNU Pth threads library. */
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+# if !HAVE_PTH_RWLOCK_ACQUIRE_PREFER_WRITER
+
+int
+glthread_rwlock_init_multithreaded (gl_rwlock_t *lock)
+{
+ if (!pth_mutex_init (&lock->lock))
+ return errno;
+ if (!pth_cond_init (&lock->waiting_readers))
+ return errno;
+ if (!pth_cond_init (&lock->waiting_writers))
+ return errno;
+ lock->waiting_writers_count = 0;
+ lock->runcount = 0;
+ lock->initialized = 1;
+ return 0;
+}
+
+int
+glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock)
+{
+ if (!lock->initialized)
+ glthread_rwlock_init_multithreaded (lock);
+ if (!pth_mutex_acquire (&lock->lock, 0, NULL))
+ return errno;
+ /* Test whether only readers are currently running, and whether the runcount
+ field will not overflow, and whether no writer is waiting. The latter
+ condition is because POSIX recommends that "write locks shall take
+ precedence over read locks", to avoid "writer starvation". */
+ while (!(lock->runcount + 1 > 0 && lock->waiting_writers_count == 0))
+ {
+ /* This thread has to wait for a while. Enqueue it among the
+ waiting_readers. */
+ if (!pth_cond_await (&lock->waiting_readers, &lock->lock, NULL))
+ {
+ int err = errno;
+ pth_mutex_release (&lock->lock);
+ return err;
+ }
+ }
+ lock->runcount++;
+ return (!pth_mutex_release (&lock->lock) ? errno : 0);
+}
+
+int
+glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock)
+{
+ if (!lock->initialized)
+ glthread_rwlock_init_multithreaded (lock);
+ if (!pth_mutex_acquire (&lock->lock, 0, NULL))
+ return errno;
+ /* Test whether no readers or writers are currently running. */
+ while (!(lock->runcount == 0))
+ {
+ /* This thread has to wait for a while. Enqueue it among the
+ waiting_writers. */
+ lock->waiting_writers_count++;
+ if (!pth_cond_await (&lock->waiting_writers, &lock->lock, NULL))
+ {
+ int err = errno;
+ lock->waiting_writers_count--;
+ pth_mutex_release (&lock->lock);
+ return err;
+ }
+ lock->waiting_writers_count--;
+ }
+ lock->runcount--; /* runcount becomes -1 */
+ return (!pth_mutex_release (&lock->lock) ? errno : 0);
+}
+
+int
+glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock)
+{
+ int err;
+
+ if (!lock->initialized)
+ return EINVAL;
+ if (!pth_mutex_acquire (&lock->lock, 0, NULL))
+ return errno;
+ if (lock->runcount < 0)
+ {
+ /* Drop a writer lock. */
+ if (!(lock->runcount == -1))
+ {
+ pth_mutex_release (&lock->lock);
+ return EINVAL;
+ }
+ lock->runcount = 0;
+ }
+ else
+ {
+ /* Drop a reader lock. */
+ if (!(lock->runcount > 0))
+ {
+ pth_mutex_release (&lock->lock);
+ return EINVAL;
+ }
+ lock->runcount--;
+ }
+ if (lock->runcount == 0)
+ {
+ /* POSIX recommends that "write locks shall take precedence over read
+ locks", to avoid "writer starvation". */
+ if (lock->waiting_writers_count > 0)
+ {
+ /* Wake up one of the waiting writers. */
+ if (!pth_cond_notify (&lock->waiting_writers, FALSE))
+ {
+ int err = errno;
+ pth_mutex_release (&lock->lock);
+ return err;
+ }
+ }
+ else
+ {
+ /* Wake up all waiting readers. */
+ if (!pth_cond_notify (&lock->waiting_readers, TRUE))
+ {
+ int err = errno;
+ pth_mutex_release (&lock->lock);
+ return err;
+ }
+ }
+ }
+ return (!pth_mutex_release (&lock->lock) ? errno : 0);
+}
+
+int
+glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock)
+{
+ lock->initialized = 0;
+ return 0;
+}
+
+# endif
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+/* -------------------------- gl_once_t datatype -------------------------- */
+
+static void
+glthread_once_call (void *arg)
+{
+ void (**gl_once_temp_addr) (void) = (void (**) (void)) arg;
+ void (*initfunction) (void) = *gl_once_temp_addr;
+ initfunction ();
+}
+
+int
+glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void))
+{
+ void (*temp) (void) = initfunction;
+ return (!pth_once (once_control, glthread_once_call, &temp) ? errno : 0);
+}
+
+int
+glthread_once_singlethreaded (pth_once_t *once_control)
+{
+ /* We know that pth_once_t is an integer type. */
+ if (*once_control == PTH_ONCE_INIT)
+ {
+ /* First time use of once_control. Invert the marker. */
+ *once_control = ~ PTH_ONCE_INIT;
+ return 1;
+ }
+ else
+ return 0;
+}
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_SOLARIS_THREADS
+
+/* Use the old Solaris threads library. */
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+int
+glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock)
+{
+ int err;
+
+ err = mutex_init (&lock->mutex, USYNC_THREAD, NULL);
+ if (err != 0)
+ return err;
+ lock->owner = (thread_t) 0;
+ lock->depth = 0;
+ return 0;
+}
+
+int
+glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock)
+{
+ thread_t self = thr_self ();
+ if (lock->owner != self)
+ {
+ int err;
+
+ err = mutex_lock (&lock->mutex);
+ if (err != 0)
+ return err;
+ lock->owner = self;
+ }
+ if (++(lock->depth) == 0) /* wraparound? */
+ {
+ lock->depth--;
+ return EAGAIN;
+ }
+ return 0;
+}
+
+int
+glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != thr_self ())
+ return EPERM;
+ if (lock->depth == 0)
+ return EINVAL;
+ if (--(lock->depth) == 0)
+ {
+ lock->owner = (thread_t) 0;
+ return mutex_unlock (&lock->mutex);
+ }
+ else
+ return 0;
+}
+
+int
+glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != (thread_t) 0)
+ return EBUSY;
+ return mutex_destroy (&lock->mutex);
+}
+
+/* -------------------------- gl_once_t datatype -------------------------- */
+
+int
+glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void))
+{
+ if (!once_control->inited)
+ {
+ int err;
+
+ /* Use the mutex to guarantee that if another thread is already calling
+ the initfunction, this thread waits until it's finished. */
+ err = mutex_lock (&once_control->mutex);
+ if (err != 0)
+ return err;
+ if (!once_control->inited)
+ {
+ once_control->inited = 1;
+ initfunction ();
+ }
+ return mutex_unlock (&once_control->mutex);
+ }
+ else
+ return 0;
+}
+
+int
+glthread_once_singlethreaded (gl_once_t *once_control)
+{
+ /* We know that gl_once_t contains an integer type. */
+ if (!once_control->inited)
+ {
+ /* First time use of once_control. Invert the marker. */
+ once_control->inited = ~ 0;
+ return 1;
+ }
+ else
+ return 0;
+}
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_WINDOWS_THREADS
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+void
+glthread_lock_init_func (gl_lock_t *lock)
+{
+ InitializeCriticalSection (&lock->lock);
+ lock->guard.done = 1;
+}
+
+int
+glthread_lock_lock_func (gl_lock_t *lock)
+{
+ if (!lock->guard.done)
+ {
+ if (InterlockedIncrement (&lock->guard.started) == 0)
+ /* This thread is the first one to need this lock. Initialize it. */
+ glthread_lock_init (lock);
+ else
+ /* Yield the CPU while waiting for another thread to finish
+ initializing this lock. */
+ while (!lock->guard.done)
+ Sleep (0);
+ }
+ EnterCriticalSection (&lock->lock);
+ return 0;
+}
+
+int
+glthread_lock_unlock_func (gl_lock_t *lock)
+{
+ if (!lock->guard.done)
+ return EINVAL;
+ LeaveCriticalSection (&lock->lock);
+ return 0;
+}
+
+int
+glthread_lock_destroy_func (gl_lock_t *lock)
+{
+ if (!lock->guard.done)
+ return EINVAL;
+ DeleteCriticalSection (&lock->lock);
+ lock->guard.done = 0;
+ return 0;
+}
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+/* In this file, the waitqueues are implemented as circular arrays. */
+#define gl_waitqueue_t gl_carray_waitqueue_t
+
+static void
+gl_waitqueue_init (gl_waitqueue_t *wq)
+{
+ wq->array = NULL;
+ wq->count = 0;
+ wq->alloc = 0;
+ wq->offset = 0;
+}
+
+/* Enqueues the current thread, represented by an event, in a wait queue.
+ Returns INVALID_HANDLE_VALUE if an allocation failure occurs. */
+static HANDLE
+gl_waitqueue_add (gl_waitqueue_t *wq)
+{
+ HANDLE event;
+ unsigned int index;
+
+ if (wq->count == wq->alloc)
+ {
+ unsigned int new_alloc = 2 * wq->alloc + 1;
+ HANDLE *new_array =
+ (HANDLE *) realloc (wq->array, new_alloc * sizeof (HANDLE));
+ if (new_array == NULL)
+ /* No more memory. */
+ return INVALID_HANDLE_VALUE;
+ /* Now is a good opportunity to rotate the array so that its contents
+ starts at offset 0. */
+ if (wq->offset > 0)
+ {
+ unsigned int old_count = wq->count;
+ unsigned int old_alloc = wq->alloc;
+ unsigned int old_offset = wq->offset;
+ unsigned int i;
+ if (old_offset + old_count > old_alloc)
+ {
+ unsigned int limit = old_offset + old_count - old_alloc;
+ for (i = 0; i < limit; i++)
+ new_array[old_alloc + i] = new_array[i];
+ }
+ for (i = 0; i < old_count; i++)
+ new_array[i] = new_array[old_offset + i];
+ wq->offset = 0;
+ }
+ wq->array = new_array;
+ wq->alloc = new_alloc;
+ }
+ /* Whether the created event is a manual-reset one or an auto-reset one,
+ does not matter, since we will wait on it only once. */
+ event = CreateEvent (NULL, TRUE, FALSE, NULL);
+ if (event == INVALID_HANDLE_VALUE)
+ /* No way to allocate an event. */
+ return INVALID_HANDLE_VALUE;
+ index = wq->offset + wq->count;
+ if (index >= wq->alloc)
+ index -= wq->alloc;
+ wq->array[index] = event;
+ wq->count++;
+ return event;
+}
+
+/* Notifies the first thread from a wait queue and dequeues it. */
+static void
+gl_waitqueue_notify_first (gl_waitqueue_t *wq)
+{
+ SetEvent (wq->array[wq->offset + 0]);
+ wq->offset++;
+ wq->count--;
+ if (wq->count == 0 || wq->offset == wq->alloc)
+ wq->offset = 0;
+}
+
+/* Notifies all threads from a wait queue and dequeues them all. */
+static void
+gl_waitqueue_notify_all (gl_waitqueue_t *wq)
+{
+ unsigned int i;
+
+ for (i = 0; i < wq->count; i++)
+ {
+ unsigned int index = wq->offset + i;
+ if (index >= wq->alloc)
+ index -= wq->alloc;
+ SetEvent (wq->array[index]);
+ }
+ wq->count = 0;
+ wq->offset = 0;
+}
+
+void
+glthread_rwlock_init_func (gl_rwlock_t *lock)
+{
+ InitializeCriticalSection (&lock->lock);
+ gl_waitqueue_init (&lock->waiting_readers);
+ gl_waitqueue_init (&lock->waiting_writers);
+ lock->runcount = 0;
+ lock->guard.done = 1;
+}
+
+int
+glthread_rwlock_rdlock_func (gl_rwlock_t *lock)
+{
+ if (!lock->guard.done)
+ {
+ if (InterlockedIncrement (&lock->guard.started) == 0)
+ /* This thread is the first one to need this lock. Initialize it. */
+ glthread_rwlock_init (lock);
+ else
+ /* Yield the CPU while waiting for another thread to finish
+ initializing this lock. */
+ while (!lock->guard.done)
+ Sleep (0);
+ }
+ EnterCriticalSection (&lock->lock);
+ /* Test whether only readers are currently running, and whether the runcount
+ field will not overflow, and whether no writer is waiting. The latter
+ condition is because POSIX recommends that "write locks shall take
+ precedence over read locks", to avoid "writer starvation". */
+ if (!(lock->runcount + 1 > 0 && lock->waiting_writers.count == 0))
+ {
+ /* This thread has to wait for a while. Enqueue it among the
+ waiting_readers. */
+ HANDLE event = gl_waitqueue_add (&lock->waiting_readers);
+ if (event != INVALID_HANDLE_VALUE)
+ {
+ DWORD result;
+ LeaveCriticalSection (&lock->lock);
+ /* Wait until another thread signals this event. */
+ result = WaitForSingleObject (event, INFINITE);
+ if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
+ abort ();
+ CloseHandle (event);
+ /* The thread which signalled the event already did the bookkeeping:
+ removed us from the waiting_readers, incremented lock->runcount. */
+ if (!(lock->runcount > 0))
+ abort ();
+ return 0;
+ }
+ else
+ {
+ /* Allocation failure. Weird. */
+ do
+ {
+ LeaveCriticalSection (&lock->lock);
+ Sleep (1);
+ EnterCriticalSection (&lock->lock);
+ }
+ while (!(lock->runcount + 1 > 0));
+ }
+ }
+ lock->runcount++;
+ LeaveCriticalSection (&lock->lock);
+ return 0;
+}
+
+int
+glthread_rwlock_wrlock_func (gl_rwlock_t *lock)
+{
+ if (!lock->guard.done)
+ {
+ if (InterlockedIncrement (&lock->guard.started) == 0)
+ /* This thread is the first one to need this lock. Initialize it. */
+ glthread_rwlock_init (lock);
+ else
+ /* Yield the CPU while waiting for another thread to finish
+ initializing this lock. */
+ while (!lock->guard.done)
+ Sleep (0);
+ }
+ EnterCriticalSection (&lock->lock);
+ /* Test whether no readers or writers are currently running. */
+ if (!(lock->runcount == 0))
+ {
+ /* This thread has to wait for a while. Enqueue it among the
+ waiting_writers. */
+ HANDLE event = gl_waitqueue_add (&lock->waiting_writers);
+ if (event != INVALID_HANDLE_VALUE)
+ {
+ DWORD result;
+ LeaveCriticalSection (&lock->lock);
+ /* Wait until another thread signals this event. */
+ result = WaitForSingleObject (event, INFINITE);
+ if (result == WAIT_FAILED || result == WAIT_TIMEOUT)
+ abort ();
+ CloseHandle (event);
+ /* The thread which signalled the event already did the bookkeeping:
+ removed us from the waiting_writers, set lock->runcount = -1. */
+ if (!(lock->runcount == -1))
+ abort ();
+ return 0;
+ }
+ else
+ {
+ /* Allocation failure. Weird. */
+ do
+ {
+ LeaveCriticalSection (&lock->lock);
+ Sleep (1);
+ EnterCriticalSection (&lock->lock);
+ }
+ while (!(lock->runcount == 0));
+ }
+ }
+ lock->runcount--; /* runcount becomes -1 */
+ LeaveCriticalSection (&lock->lock);
+ return 0;
+}
+
+int
+glthread_rwlock_unlock_func (gl_rwlock_t *lock)
+{
+ if (!lock->guard.done)
+ return EINVAL;
+ EnterCriticalSection (&lock->lock);
+ if (lock->runcount < 0)
+ {
+ /* Drop a writer lock. */
+ if (!(lock->runcount == -1))
+ abort ();
+ lock->runcount = 0;
+ }
+ else
+ {
+ /* Drop a reader lock. */
+ if (!(lock->runcount > 0))
+ {
+ LeaveCriticalSection (&lock->lock);
+ return EPERM;
+ }
+ lock->runcount--;
+ }
+ if (lock->runcount == 0)
+ {
+ /* POSIX recommends that "write locks shall take precedence over read
+ locks", to avoid "writer starvation". */
+ if (lock->waiting_writers.count > 0)
+ {
+ /* Wake up one of the waiting writers. */
+ lock->runcount--;
+ gl_waitqueue_notify_first (&lock->waiting_writers);
+ }
+ else
+ {
+ /* Wake up all waiting readers. */
+ lock->runcount += lock->waiting_readers.count;
+ gl_waitqueue_notify_all (&lock->waiting_readers);
+ }
+ }
+ LeaveCriticalSection (&lock->lock);
+ return 0;
+}
+
+int
+glthread_rwlock_destroy_func (gl_rwlock_t *lock)
+{
+ if (!lock->guard.done)
+ return EINVAL;
+ if (lock->runcount != 0)
+ return EBUSY;
+ DeleteCriticalSection (&lock->lock);
+ if (lock->waiting_readers.array != NULL)
+ free (lock->waiting_readers.array);
+ if (lock->waiting_writers.array != NULL)
+ free (lock->waiting_writers.array);
+ lock->guard.done = 0;
+ return 0;
+}
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+void
+glthread_recursive_lock_init_func (gl_recursive_lock_t *lock)
+{
+ lock->owner = 0;
+ lock->depth = 0;
+ InitializeCriticalSection (&lock->lock);
+ lock->guard.done = 1;
+}
+
+int
+glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock)
+{
+ if (!lock->guard.done)
+ {
+ if (InterlockedIncrement (&lock->guard.started) == 0)
+ /* This thread is the first one to need this lock. Initialize it. */
+ glthread_recursive_lock_init (lock);
+ else
+ /* Yield the CPU while waiting for another thread to finish
+ initializing this lock. */
+ while (!lock->guard.done)
+ Sleep (0);
+ }
+ {
+ DWORD self = GetCurrentThreadId ();
+ if (lock->owner != self)
+ {
+ EnterCriticalSection (&lock->lock);
+ lock->owner = self;
+ }
+ if (++(lock->depth) == 0) /* wraparound? */
+ {
+ lock->depth--;
+ return EAGAIN;
+ }
+ }
+ return 0;
+}
+
+int
+glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != GetCurrentThreadId ())
+ return EPERM;
+ if (lock->depth == 0)
+ return EINVAL;
+ if (--(lock->depth) == 0)
+ {
+ lock->owner = 0;
+ LeaveCriticalSection (&lock->lock);
+ }
+ return 0;
+}
+
+int
+glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock)
+{
+ if (lock->owner != 0)
+ return EBUSY;
+ DeleteCriticalSection (&lock->lock);
+ lock->guard.done = 0;
+ return 0;
+}
+
+/* -------------------------- gl_once_t datatype -------------------------- */
+
+void
+glthread_once_func (gl_once_t *once_control, void (*initfunction) (void))
+{
+ if (once_control->inited <= 0)
+ {
+ if (InterlockedIncrement (&once_control->started) == 0)
+ {
+ /* This thread is the first one to come to this once_control. */
+ InitializeCriticalSection (&once_control->lock);
+ EnterCriticalSection (&once_control->lock);
+ once_control->inited = 0;
+ initfunction ();
+ once_control->inited = 1;
+ LeaveCriticalSection (&once_control->lock);
+ }
+ else
+ {
+ /* Undo last operation. */
+ InterlockedDecrement (&once_control->started);
+ /* Some other thread has already started the initialization.
+ Yield the CPU while waiting for the other thread to finish
+ initializing and taking the lock. */
+ while (once_control->inited < 0)
+ Sleep (0);
+ if (once_control->inited <= 0)
+ {
+ /* Take the lock. This blocks until the other thread has
+ finished calling the initfunction. */
+ EnterCriticalSection (&once_control->lock);
+ LeaveCriticalSection (&once_control->lock);
+ if (!(once_control->inited > 0))
+ abort ();
+ }
+ }
+ }
+}
+
+#endif
+
+/* ========================================================================= */
diff --git a/grub-core/lib/gnulib/glthread/lock.h b/grub-core/lib/gnulib/glthread/lock.h
new file mode 100644
index 0000000..636b089
--- /dev/null
+++ b/grub-core/lib/gnulib/glthread/lock.h
@@ -0,0 +1,988 @@
+/* Locking in multithreaded situations.
+ Copyright (C) 2005-2019 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <https://www.gnu.org/licenses/>. */
+
+/* Written by Bruno Haible <bruno@clisp.org>, 2005.
+ Based on GCC's gthr-posix.h, gthr-posix95.h, gthr-solaris.h,
+ gthr-win32.h. */
+
+/* This file contains locking primitives for use with a given thread library.
+ It does not contain primitives for creating threads or for other
+ synchronization primitives.
+
+ Normal (non-recursive) locks:
+ Type: gl_lock_t
+ Declaration: gl_lock_define(extern, name)
+ Initializer: gl_lock_define_initialized(, name)
+ Initialization: gl_lock_init (name);
+ Taking the lock: gl_lock_lock (name);
+ Releasing the lock: gl_lock_unlock (name);
+ De-initialization: gl_lock_destroy (name);
+ Equivalent functions with control of error handling:
+ Initialization: err = glthread_lock_init (&name);
+ Taking the lock: err = glthread_lock_lock (&name);
+ Releasing the lock: err = glthread_lock_unlock (&name);
+ De-initialization: err = glthread_lock_destroy (&name);
+
+ Read-Write (non-recursive) locks:
+ Type: gl_rwlock_t
+ Declaration: gl_rwlock_define(extern, name)
+ Initializer: gl_rwlock_define_initialized(, name)
+ Initialization: gl_rwlock_init (name);
+ Taking the lock: gl_rwlock_rdlock (name);
+ gl_rwlock_wrlock (name);
+ Releasing the lock: gl_rwlock_unlock (name);
+ De-initialization: gl_rwlock_destroy (name);
+ Equivalent functions with control of error handling:
+ Initialization: err = glthread_rwlock_init (&name);
+ Taking the lock: err = glthread_rwlock_rdlock (&name);
+ err = glthread_rwlock_wrlock (&name);
+ Releasing the lock: err = glthread_rwlock_unlock (&name);
+ De-initialization: err = glthread_rwlock_destroy (&name);
+
+ Recursive locks:
+ Type: gl_recursive_lock_t
+ Declaration: gl_recursive_lock_define(extern, name)
+ Initializer: gl_recursive_lock_define_initialized(, name)
+ Initialization: gl_recursive_lock_init (name);
+ Taking the lock: gl_recursive_lock_lock (name);
+ Releasing the lock: gl_recursive_lock_unlock (name);
+ De-initialization: gl_recursive_lock_destroy (name);
+ Equivalent functions with control of error handling:
+ Initialization: err = glthread_recursive_lock_init (&name);
+ Taking the lock: err = glthread_recursive_lock_lock (&name);
+ Releasing the lock: err = glthread_recursive_lock_unlock (&name);
+ De-initialization: err = glthread_recursive_lock_destroy (&name);
+
+ Once-only execution:
+ Type: gl_once_t
+ Initializer: gl_once_define(extern, name)
+ Execution: gl_once (name, initfunction);
+ Equivalent functions with control of error handling:
+ Execution: err = glthread_once (&name, initfunction);
+*/
+
+
+#ifndef _LOCK_H
+#define _LOCK_H
+
+#include <errno.h>
+#include <stdlib.h>
+
+/* ========================================================================= */
+
+#if USE_POSIX_THREADS
+
+/* Use the POSIX threads library. */
+
+# include <pthread.h>
+
+# ifdef __cplusplus
+extern "C" {
+# endif
+
+# if PTHREAD_IN_USE_DETECTION_HARD
+
+/* The pthread_in_use() detection needs to be done at runtime. */
+# define pthread_in_use() \
+ glthread_in_use ()
+extern int glthread_in_use (void);
+
+# endif
+
+# if USE_POSIX_THREADS_WEAK
+
+/* Use weak references to the POSIX threads library. */
+
+/* Weak references avoid dragging in external libraries if the other parts
+ of the program don't use them. Here we use them, because we don't want
+ every program that uses libintl to depend on libpthread. This assumes
+ that libpthread would not be loaded after libintl; i.e. if libintl is
+ loaded first, by an executable that does not depend on libpthread, and
+ then a module is dynamically loaded that depends on libpthread, libintl
+ will not be multithread-safe. */
+
+/* The way to test at runtime whether libpthread is present is to test
+ whether a function pointer's value, such as &pthread_mutex_init, is
+ non-NULL. However, some versions of GCC have a bug through which, in
+ PIC mode, &foo != NULL always evaluates to true if there is a direct
+ call to foo(...) in the same function. To avoid this, we test the
+ address of a function in libpthread that we don't use. */
+
+# pragma weak pthread_mutex_init
+# pragma weak pthread_mutex_lock
+# pragma weak pthread_mutex_unlock
+# pragma weak pthread_mutex_destroy
+# pragma weak pthread_rwlock_init
+# pragma weak pthread_rwlock_rdlock
+# pragma weak pthread_rwlock_wrlock
+# pragma weak pthread_rwlock_unlock
+# pragma weak pthread_rwlock_destroy
+# pragma weak pthread_once
+# pragma weak pthread_cond_init
+# pragma weak pthread_cond_wait
+# pragma weak pthread_cond_signal
+# pragma weak pthread_cond_broadcast
+# pragma weak pthread_cond_destroy
+# pragma weak pthread_mutexattr_init
+# pragma weak pthread_mutexattr_settype
+# pragma weak pthread_mutexattr_destroy
+# pragma weak pthread_rwlockattr_init
+# if __GNU_LIBRARY__ > 1
+# pragma weak pthread_rwlockattr_setkind_np
+# endif
+# pragma weak pthread_rwlockattr_destroy
+# ifndef pthread_self
+# pragma weak pthread_self
+# endif
+
+# if !PTHREAD_IN_USE_DETECTION_HARD
+ /* Considering all platforms with USE_POSIX_THREADS_WEAK, only few symbols
+ can be used to determine whether libpthread is in use. These are:
+ pthread_mutexattr_gettype
+ pthread_rwlockattr_destroy
+ pthread_rwlockattr_init
+ */
+# pragma weak pthread_mutexattr_gettype
+# define pthread_in_use() (pthread_mutexattr_gettype != NULL)
+# endif
+
+# else
+
+# if !PTHREAD_IN_USE_DETECTION_HARD
+# define pthread_in_use() 1
+# endif
+
+# endif
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+typedef pthread_mutex_t gl_lock_t;
+# define gl_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_mutex_t NAME;
+# define gl_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_mutex_t NAME = gl_lock_initializer;
+# define gl_lock_initializer \
+ PTHREAD_MUTEX_INITIALIZER
+# define glthread_lock_init(LOCK) \
+ (pthread_in_use () ? pthread_mutex_init (LOCK, NULL) : 0)
+# define glthread_lock_lock(LOCK) \
+ (pthread_in_use () ? pthread_mutex_lock (LOCK) : 0)
+# define glthread_lock_unlock(LOCK) \
+ (pthread_in_use () ? pthread_mutex_unlock (LOCK) : 0)
+# define glthread_lock_destroy(LOCK) \
+ (pthread_in_use () ? pthread_mutex_destroy (LOCK) : 0)
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+# if HAVE_PTHREAD_RWLOCK && (HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER || (defined PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP && (__GNU_LIBRARY__ > 1)))
+
+# ifdef PTHREAD_RWLOCK_INITIALIZER
+
+typedef pthread_rwlock_t gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_rwlock_t NAME = gl_rwlock_initializer;
+# if HAVE_PTHREAD_RWLOCK_RDLOCK_PREFER_WRITER
+# define gl_rwlock_initializer \
+ PTHREAD_RWLOCK_INITIALIZER
+# define glthread_rwlock_init(LOCK) \
+ (pthread_in_use () ? pthread_rwlock_init (LOCK, NULL) : 0)
+# else /* glibc with bug https://sourceware.org/bugzilla/show_bug.cgi?id=13701 */
+# define gl_rwlock_initializer \
+ PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP
+# define glthread_rwlock_init(LOCK) \
+ (pthread_in_use () ? glthread_rwlock_init_for_glibc (LOCK) : 0)
+extern int glthread_rwlock_init_for_glibc (pthread_rwlock_t *lock);
+# endif
+# define glthread_rwlock_rdlock(LOCK) \
+ (pthread_in_use () ? pthread_rwlock_rdlock (LOCK) : 0)
+# define glthread_rwlock_wrlock(LOCK) \
+ (pthread_in_use () ? pthread_rwlock_wrlock (LOCK) : 0)
+# define glthread_rwlock_unlock(LOCK) \
+ (pthread_in_use () ? pthread_rwlock_unlock (LOCK) : 0)
+# define glthread_rwlock_destroy(LOCK) \
+ (pthread_in_use () ? pthread_rwlock_destroy (LOCK) : 0)
+
+# else
+
+typedef struct
+ {
+ int initialized;
+ pthread_mutex_t guard; /* protects the initialization */
+ pthread_rwlock_t rwlock; /* read-write lock */
+ }
+ gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME = gl_rwlock_initializer;
+# define gl_rwlock_initializer \
+ { 0, PTHREAD_MUTEX_INITIALIZER }
+# define glthread_rwlock_init(LOCK) \
+ (pthread_in_use () ? glthread_rwlock_init_multithreaded (LOCK) : 0)
+# define glthread_rwlock_rdlock(LOCK) \
+ (pthread_in_use () ? glthread_rwlock_rdlock_multithreaded (LOCK) : 0)
+# define glthread_rwlock_wrlock(LOCK) \
+ (pthread_in_use () ? glthread_rwlock_wrlock_multithreaded (LOCK) : 0)
+# define glthread_rwlock_unlock(LOCK) \
+ (pthread_in_use () ? glthread_rwlock_unlock_multithreaded (LOCK) : 0)
+# define glthread_rwlock_destroy(LOCK) \
+ (pthread_in_use () ? glthread_rwlock_destroy_multithreaded (LOCK) : 0)
+extern int glthread_rwlock_init_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock);
+
+# endif
+
+# else
+
+typedef struct
+ {
+ pthread_mutex_t lock; /* protects the remaining fields */
+ pthread_cond_t waiting_readers; /* waiting readers */
+ pthread_cond_t waiting_writers; /* waiting writers */
+ unsigned int waiting_writers_count; /* number of waiting writers */
+ int runcount; /* number of readers running, or -1 when a writer runs */
+ }
+ gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME = gl_rwlock_initializer;
+# define gl_rwlock_initializer \
+ { PTHREAD_MUTEX_INITIALIZER, PTHREAD_COND_INITIALIZER, PTHREAD_COND_INITIALIZER, 0, 0 }
+# define glthread_rwlock_init(LOCK) \
+ (pthread_in_use () ? glthread_rwlock_init_multithreaded (LOCK) : 0)
+# define glthread_rwlock_rdlock(LOCK) \
+ (pthread_in_use () ? glthread_rwlock_rdlock_multithreaded (LOCK) : 0)
+# define glthread_rwlock_wrlock(LOCK) \
+ (pthread_in_use () ? glthread_rwlock_wrlock_multithreaded (LOCK) : 0)
+# define glthread_rwlock_unlock(LOCK) \
+ (pthread_in_use () ? glthread_rwlock_unlock_multithreaded (LOCK) : 0)
+# define glthread_rwlock_destroy(LOCK) \
+ (pthread_in_use () ? glthread_rwlock_destroy_multithreaded (LOCK) : 0)
+extern int glthread_rwlock_init_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock);
+
+# endif
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+# if HAVE_PTHREAD_MUTEX_RECURSIVE
+
+# if defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER || defined PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+
+typedef pthread_mutex_t gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_mutex_t NAME;
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_mutex_t NAME = gl_recursive_lock_initializer;
+# ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER
+# define gl_recursive_lock_initializer \
+ PTHREAD_RECURSIVE_MUTEX_INITIALIZER
+# else
+# define gl_recursive_lock_initializer \
+ PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+# endif
+# define glthread_recursive_lock_init(LOCK) \
+ (pthread_in_use () ? glthread_recursive_lock_init_multithreaded (LOCK) : 0)
+# define glthread_recursive_lock_lock(LOCK) \
+ (pthread_in_use () ? pthread_mutex_lock (LOCK) : 0)
+# define glthread_recursive_lock_unlock(LOCK) \
+ (pthread_in_use () ? pthread_mutex_unlock (LOCK) : 0)
+# define glthread_recursive_lock_destroy(LOCK) \
+ (pthread_in_use () ? pthread_mutex_destroy (LOCK) : 0)
+extern int glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock);
+
+# else
+
+typedef struct
+ {
+ pthread_mutex_t recmutex; /* recursive mutex */
+ pthread_mutex_t guard; /* protects the initialization */
+ int initialized;
+ }
+ gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME;
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME = gl_recursive_lock_initializer;
+# define gl_recursive_lock_initializer \
+ { PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, 0 }
+# define glthread_recursive_lock_init(LOCK) \
+ (pthread_in_use () ? glthread_recursive_lock_init_multithreaded (LOCK) : 0)
+# define glthread_recursive_lock_lock(LOCK) \
+ (pthread_in_use () ? glthread_recursive_lock_lock_multithreaded (LOCK) : 0)
+# define glthread_recursive_lock_unlock(LOCK) \
+ (pthread_in_use () ? glthread_recursive_lock_unlock_multithreaded (LOCK) : 0)
+# define glthread_recursive_lock_destroy(LOCK) \
+ (pthread_in_use () ? glthread_recursive_lock_destroy_multithreaded (LOCK) : 0)
+extern int glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock);
+
+# endif
+
+# else
+
+/* Old versions of POSIX threads on Solaris did not have recursive locks.
+ We have to implement them ourselves. */
+
+typedef struct
+ {
+ pthread_mutex_t mutex;
+ pthread_t owner;
+ unsigned long depth;
+ }
+ gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME;
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME = gl_recursive_lock_initializer;
+# define gl_recursive_lock_initializer \
+ { PTHREAD_MUTEX_INITIALIZER, (pthread_t) 0, 0 }
+# define glthread_recursive_lock_init(LOCK) \
+ (pthread_in_use () ? glthread_recursive_lock_init_multithreaded (LOCK) : 0)
+# define glthread_recursive_lock_lock(LOCK) \
+ (pthread_in_use () ? glthread_recursive_lock_lock_multithreaded (LOCK) : 0)
+# define glthread_recursive_lock_unlock(LOCK) \
+ (pthread_in_use () ? glthread_recursive_lock_unlock_multithreaded (LOCK) : 0)
+# define glthread_recursive_lock_destroy(LOCK) \
+ (pthread_in_use () ? glthread_recursive_lock_destroy_multithreaded (LOCK) : 0)
+extern int glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock);
+
+# endif
+
+/* -------------------------- gl_once_t datatype -------------------------- */
+
+typedef pthread_once_t gl_once_t;
+# define gl_once_define(STORAGECLASS, NAME) \
+ STORAGECLASS pthread_once_t NAME = PTHREAD_ONCE_INIT;
+# define glthread_once(ONCE_CONTROL, INITFUNCTION) \
+ (pthread_in_use () \
+ ? pthread_once (ONCE_CONTROL, INITFUNCTION) \
+ : (glthread_once_singlethreaded (ONCE_CONTROL) ? (INITFUNCTION (), 0) : 0))
+extern int glthread_once_singlethreaded (pthread_once_t *once_control);
+
+# ifdef __cplusplus
+}
+# endif
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_PTH_THREADS
+
+/* Use the GNU Pth threads library. */
+
+# include <pth.h>
+
+# ifdef __cplusplus
+extern "C" {
+# endif
+
+# if USE_PTH_THREADS_WEAK
+
+/* Use weak references to the GNU Pth threads library. */
+
+# pragma weak pth_mutex_init
+# pragma weak pth_mutex_acquire
+# pragma weak pth_mutex_release
+# pragma weak pth_rwlock_init
+# pragma weak pth_rwlock_acquire
+# pragma weak pth_rwlock_release
+# pragma weak pth_once
+# pragma weak pth_cond_init
+# pragma weak pth_cond_await
+# pragma weak pth_cond_notify
+
+# pragma weak pth_cancel
+# define pth_in_use() (pth_cancel != NULL)
+
+# else
+
+# define pth_in_use() 1
+
+# endif
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+typedef pth_mutex_t gl_lock_t;
+# define gl_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pth_mutex_t NAME;
+# define gl_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pth_mutex_t NAME = gl_lock_initializer;
+# define gl_lock_initializer \
+ PTH_MUTEX_INIT
+# define glthread_lock_init(LOCK) \
+ (pth_in_use () && !pth_mutex_init (LOCK) ? errno : 0)
+# define glthread_lock_lock(LOCK) \
+ (pth_in_use () && !pth_mutex_acquire (LOCK, 0, NULL) ? errno : 0)
+# define glthread_lock_unlock(LOCK) \
+ (pth_in_use () && !pth_mutex_release (LOCK) ? errno : 0)
+# define glthread_lock_destroy(LOCK) \
+ ((void)(LOCK), 0)
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+/* Pth pth_rwlock_acquire always prefers readers. No autoconf test so far. */
+# if HAVE_PTH_RWLOCK_ACQUIRE_PREFER_WRITER
+
+typedef pth_rwlock_t gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pth_rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pth_rwlock_t NAME = gl_rwlock_initializer;
+# define gl_rwlock_initializer \
+ PTH_RWLOCK_INIT
+# define glthread_rwlock_init(LOCK) \
+ (pth_in_use () && !pth_rwlock_init (LOCK) ? errno : 0)
+# define glthread_rwlock_rdlock(LOCK) \
+ (pth_in_use () && !pth_rwlock_acquire (LOCK, PTH_RWLOCK_RD, 0, NULL) ? errno : 0)
+# define glthread_rwlock_wrlock(LOCK) \
+ (pth_in_use () && !pth_rwlock_acquire (LOCK, PTH_RWLOCK_RW, 0, NULL) ? errno : 0)
+# define glthread_rwlock_unlock(LOCK) \
+ (pth_in_use () && !pth_rwlock_release (LOCK) ? errno : 0)
+# define glthread_rwlock_destroy(LOCK) \
+ ((void)(LOCK), 0)
+
+# else
+
+typedef struct
+ {
+ int initialized;
+ pth_mutex_t lock; /* protects the remaining fields */
+ pth_cond_t waiting_readers; /* waiting readers */
+ pth_cond_t waiting_writers; /* waiting writers */
+ unsigned int waiting_writers_count; /* number of waiting writers */
+ int runcount; /* number of readers running, or -1 when a writer runs */
+ }
+ gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME = gl_rwlock_initializer;
+# define gl_rwlock_initializer \
+ { 0 }
+# define glthread_rwlock_init(LOCK) \
+ (pth_in_use () ? glthread_rwlock_init_multithreaded (LOCK) : 0)
+# define glthread_rwlock_rdlock(LOCK) \
+ (pth_in_use () ? glthread_rwlock_rdlock_multithreaded (LOCK) : 0)
+# define glthread_rwlock_wrlock(LOCK) \
+ (pth_in_use () ? glthread_rwlock_wrlock_multithreaded (LOCK) : 0)
+# define glthread_rwlock_unlock(LOCK) \
+ (pth_in_use () ? glthread_rwlock_unlock_multithreaded (LOCK) : 0)
+# define glthread_rwlock_destroy(LOCK) \
+ (pth_in_use () ? glthread_rwlock_destroy_multithreaded (LOCK) : 0)
+extern int glthread_rwlock_init_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_rdlock_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_wrlock_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_unlock_multithreaded (gl_rwlock_t *lock);
+extern int glthread_rwlock_destroy_multithreaded (gl_rwlock_t *lock);
+
+# endif
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+/* In Pth, mutexes are recursive by default. */
+typedef pth_mutex_t gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS pth_mutex_t NAME;
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS pth_mutex_t NAME = gl_recursive_lock_initializer;
+# define gl_recursive_lock_initializer \
+ PTH_MUTEX_INIT
+# define glthread_recursive_lock_init(LOCK) \
+ (pth_in_use () && !pth_mutex_init (LOCK) ? errno : 0)
+# define glthread_recursive_lock_lock(LOCK) \
+ (pth_in_use () && !pth_mutex_acquire (LOCK, 0, NULL) ? errno : 0)
+# define glthread_recursive_lock_unlock(LOCK) \
+ (pth_in_use () && !pth_mutex_release (LOCK) ? errno : 0)
+# define glthread_recursive_lock_destroy(LOCK) \
+ ((void)(LOCK), 0)
+
+/* -------------------------- gl_once_t datatype -------------------------- */
+
+typedef pth_once_t gl_once_t;
+# define gl_once_define(STORAGECLASS, NAME) \
+ STORAGECLASS pth_once_t NAME = PTH_ONCE_INIT;
+# define glthread_once(ONCE_CONTROL, INITFUNCTION) \
+ (pth_in_use () \
+ ? glthread_once_multithreaded (ONCE_CONTROL, INITFUNCTION) \
+ : (glthread_once_singlethreaded (ONCE_CONTROL) ? (INITFUNCTION (), 0) : 0))
+extern int glthread_once_multithreaded (pth_once_t *once_control, void (*initfunction) (void));
+extern int glthread_once_singlethreaded (pth_once_t *once_control);
+
+# ifdef __cplusplus
+}
+# endif
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_SOLARIS_THREADS
+
+/* Use the old Solaris threads library. */
+
+# include <thread.h>
+# include <synch.h>
+
+# ifdef __cplusplus
+extern "C" {
+# endif
+
+# if USE_SOLARIS_THREADS_WEAK
+
+/* Use weak references to the old Solaris threads library. */
+
+# pragma weak mutex_init
+# pragma weak mutex_lock
+# pragma weak mutex_unlock
+# pragma weak mutex_destroy
+# pragma weak rwlock_init
+# pragma weak rw_rdlock
+# pragma weak rw_wrlock
+# pragma weak rw_unlock
+# pragma weak rwlock_destroy
+# pragma weak thr_self
+
+# pragma weak thr_suspend
+# define thread_in_use() (thr_suspend != NULL)
+
+# else
+
+# define thread_in_use() 1
+
+# endif
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+typedef mutex_t gl_lock_t;
+# define gl_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS mutex_t NAME;
+# define gl_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS mutex_t NAME = gl_lock_initializer;
+# define gl_lock_initializer \
+ DEFAULTMUTEX
+# define glthread_lock_init(LOCK) \
+ (thread_in_use () ? mutex_init (LOCK, USYNC_THREAD, NULL) : 0)
+# define glthread_lock_lock(LOCK) \
+ (thread_in_use () ? mutex_lock (LOCK) : 0)
+# define glthread_lock_unlock(LOCK) \
+ (thread_in_use () ? mutex_unlock (LOCK) : 0)
+# define glthread_lock_destroy(LOCK) \
+ (thread_in_use () ? mutex_destroy (LOCK) : 0)
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+typedef rwlock_t gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS rwlock_t NAME = gl_rwlock_initializer;
+# define gl_rwlock_initializer \
+ DEFAULTRWLOCK
+# define glthread_rwlock_init(LOCK) \
+ (thread_in_use () ? rwlock_init (LOCK, USYNC_THREAD, NULL) : 0)
+# define glthread_rwlock_rdlock(LOCK) \
+ (thread_in_use () ? rw_rdlock (LOCK) : 0)
+# define glthread_rwlock_wrlock(LOCK) \
+ (thread_in_use () ? rw_wrlock (LOCK) : 0)
+# define glthread_rwlock_unlock(LOCK) \
+ (thread_in_use () ? rw_unlock (LOCK) : 0)
+# define glthread_rwlock_destroy(LOCK) \
+ (thread_in_use () ? rwlock_destroy (LOCK) : 0)
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+/* Old Solaris threads did not have recursive locks.
+ We have to implement them ourselves. */
+
+typedef struct
+ {
+ mutex_t mutex;
+ thread_t owner;
+ unsigned long depth;
+ }
+ gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME;
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME = gl_recursive_lock_initializer;
+# define gl_recursive_lock_initializer \
+ { DEFAULTMUTEX, (thread_t) 0, 0 }
+# define glthread_recursive_lock_init(LOCK) \
+ (thread_in_use () ? glthread_recursive_lock_init_multithreaded (LOCK) : 0)
+# define glthread_recursive_lock_lock(LOCK) \
+ (thread_in_use () ? glthread_recursive_lock_lock_multithreaded (LOCK) : 0)
+# define glthread_recursive_lock_unlock(LOCK) \
+ (thread_in_use () ? glthread_recursive_lock_unlock_multithreaded (LOCK) : 0)
+# define glthread_recursive_lock_destroy(LOCK) \
+ (thread_in_use () ? glthread_recursive_lock_destroy_multithreaded (LOCK) : 0)
+extern int glthread_recursive_lock_init_multithreaded (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_lock_multithreaded (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_unlock_multithreaded (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_destroy_multithreaded (gl_recursive_lock_t *lock);
+
+/* -------------------------- gl_once_t datatype -------------------------- */
+
+typedef struct
+ {
+ volatile int inited;
+ mutex_t mutex;
+ }
+ gl_once_t;
+# define gl_once_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_once_t NAME = { 0, DEFAULTMUTEX };
+# define glthread_once(ONCE_CONTROL, INITFUNCTION) \
+ (thread_in_use () \
+ ? glthread_once_multithreaded (ONCE_CONTROL, INITFUNCTION) \
+ : (glthread_once_singlethreaded (ONCE_CONTROL) ? (INITFUNCTION (), 0) : 0))
+extern int glthread_once_multithreaded (gl_once_t *once_control, void (*initfunction) (void));
+extern int glthread_once_singlethreaded (gl_once_t *once_control);
+
+# ifdef __cplusplus
+}
+# endif
+
+#endif
+
+/* ========================================================================= */
+
+#if USE_WINDOWS_THREADS
+
+# define WIN32_LEAN_AND_MEAN /* avoid including junk */
+# include <windows.h>
+
+# ifdef __cplusplus
+extern "C" {
+# endif
+
+/* We can use CRITICAL_SECTION directly, rather than the native Windows Event,
+ Mutex, Semaphore types, because
+ - we need only to synchronize inside a single process (address space),
+ not inter-process locking,
+ - we don't need to support trylock operations. (TryEnterCriticalSection
+ does not work on Windows 95/98/ME. Packages that need trylock usually
+ define their own mutex type.) */
+
+/* There is no way to statically initialize a CRITICAL_SECTION. It needs
+ to be done lazily, once only. For this we need spinlocks. */
+
+typedef struct { volatile int done; volatile long started; } gl_spinlock_t;
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+typedef struct
+ {
+ gl_spinlock_t guard; /* protects the initialization */
+ CRITICAL_SECTION lock;
+ }
+ gl_lock_t;
+# define gl_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_lock_t NAME;
+# define gl_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_lock_t NAME = gl_lock_initializer;
+# define gl_lock_initializer \
+ { { 0, -1 } }
+# define glthread_lock_init(LOCK) \
+ (glthread_lock_init_func (LOCK), 0)
+# define glthread_lock_lock(LOCK) \
+ glthread_lock_lock_func (LOCK)
+# define glthread_lock_unlock(LOCK) \
+ glthread_lock_unlock_func (LOCK)
+# define glthread_lock_destroy(LOCK) \
+ glthread_lock_destroy_func (LOCK)
+extern void glthread_lock_init_func (gl_lock_t *lock);
+extern int glthread_lock_lock_func (gl_lock_t *lock);
+extern int glthread_lock_unlock_func (gl_lock_t *lock);
+extern int glthread_lock_destroy_func (gl_lock_t *lock);
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+/* It is impossible to implement read-write locks using plain locks, without
+ introducing an extra thread dedicated to managing read-write locks.
+ Therefore here we need to use the low-level Event type. */
+
+typedef struct
+ {
+ HANDLE *array; /* array of waiting threads, each represented by an event */
+ unsigned int count; /* number of waiting threads */
+ unsigned int alloc; /* length of allocated array */
+ unsigned int offset; /* index of first waiting thread in array */
+ }
+ gl_carray_waitqueue_t;
+typedef struct
+ {
+ gl_spinlock_t guard; /* protects the initialization */
+ CRITICAL_SECTION lock; /* protects the remaining fields */
+ gl_carray_waitqueue_t waiting_readers; /* waiting readers */
+ gl_carray_waitqueue_t waiting_writers; /* waiting writers */
+ int runcount; /* number of readers running, or -1 when a writer runs */
+ }
+ gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME;
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_rwlock_t NAME = gl_rwlock_initializer;
+# define gl_rwlock_initializer \
+ { { 0, -1 } }
+# define glthread_rwlock_init(LOCK) \
+ (glthread_rwlock_init_func (LOCK), 0)
+# define glthread_rwlock_rdlock(LOCK) \
+ glthread_rwlock_rdlock_func (LOCK)
+# define glthread_rwlock_wrlock(LOCK) \
+ glthread_rwlock_wrlock_func (LOCK)
+# define glthread_rwlock_unlock(LOCK) \
+ glthread_rwlock_unlock_func (LOCK)
+# define glthread_rwlock_destroy(LOCK) \
+ glthread_rwlock_destroy_func (LOCK)
+extern void glthread_rwlock_init_func (gl_rwlock_t *lock);
+extern int glthread_rwlock_rdlock_func (gl_rwlock_t *lock);
+extern int glthread_rwlock_wrlock_func (gl_rwlock_t *lock);
+extern int glthread_rwlock_unlock_func (gl_rwlock_t *lock);
+extern int glthread_rwlock_destroy_func (gl_rwlock_t *lock);
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+/* The native Windows documentation says that CRITICAL_SECTION already
+ implements a recursive lock. But we need not rely on it: It's easy to
+ implement a recursive lock without this assumption. */
+
+typedef struct
+ {
+ gl_spinlock_t guard; /* protects the initialization */
+ DWORD owner;
+ unsigned long depth;
+ CRITICAL_SECTION lock;
+ }
+ gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME;
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME) \
+ STORAGECLASS gl_recursive_lock_t NAME = gl_recursive_lock_initializer;
+# define gl_recursive_lock_initializer \
+ { { 0, -1 }, 0, 0 }
+# define glthread_recursive_lock_init(LOCK) \
+ (glthread_recursive_lock_init_func (LOCK), 0)
+# define glthread_recursive_lock_lock(LOCK) \
+ glthread_recursive_lock_lock_func (LOCK)
+# define glthread_recursive_lock_unlock(LOCK) \
+ glthread_recursive_lock_unlock_func (LOCK)
+# define glthread_recursive_lock_destroy(LOCK) \
+ glthread_recursive_lock_destroy_func (LOCK)
+extern void glthread_recursive_lock_init_func (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_lock_func (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_unlock_func (gl_recursive_lock_t *lock);
+extern int glthread_recursive_lock_destroy_func (gl_recursive_lock_t *lock);
+
+/* -------------------------- gl_once_t datatype -------------------------- */
+
+typedef struct
+ {
+ volatile int inited;
+ volatile long started;
+ CRITICAL_SECTION lock;
+ }
+ gl_once_t;
+# define gl_once_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_once_t NAME = { -1, -1 };
+# define glthread_once(ONCE_CONTROL, INITFUNCTION) \
+ (glthread_once_func (ONCE_CONTROL, INITFUNCTION), 0)
+extern void glthread_once_func (gl_once_t *once_control, void (*initfunction) (void));
+
+# ifdef __cplusplus
+}
+# endif
+
+#endif
+
+/* ========================================================================= */
+
+#if !(USE_POSIX_THREADS || USE_PTH_THREADS || USE_SOLARIS_THREADS || USE_WINDOWS_THREADS)
+
+/* Provide dummy implementation if threads are not supported. */
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+typedef int gl_lock_t;
+# define gl_lock_define(STORAGECLASS, NAME)
+# define gl_lock_define_initialized(STORAGECLASS, NAME)
+# define glthread_lock_init(NAME) 0
+# define glthread_lock_lock(NAME) 0
+# define glthread_lock_unlock(NAME) 0
+# define glthread_lock_destroy(NAME) 0
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+typedef int gl_rwlock_t;
+# define gl_rwlock_define(STORAGECLASS, NAME)
+# define gl_rwlock_define_initialized(STORAGECLASS, NAME)
+# define glthread_rwlock_init(NAME) 0
+# define glthread_rwlock_rdlock(NAME) 0
+# define glthread_rwlock_wrlock(NAME) 0
+# define glthread_rwlock_unlock(NAME) 0
+# define glthread_rwlock_destroy(NAME) 0
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+typedef int gl_recursive_lock_t;
+# define gl_recursive_lock_define(STORAGECLASS, NAME)
+# define gl_recursive_lock_define_initialized(STORAGECLASS, NAME)
+# define glthread_recursive_lock_init(NAME) 0
+# define glthread_recursive_lock_lock(NAME) 0
+# define glthread_recursive_lock_unlock(NAME) 0
+# define glthread_recursive_lock_destroy(NAME) 0
+
+/* -------------------------- gl_once_t datatype -------------------------- */
+
+typedef int gl_once_t;
+# define gl_once_define(STORAGECLASS, NAME) \
+ STORAGECLASS gl_once_t NAME = 0;
+# define glthread_once(ONCE_CONTROL, INITFUNCTION) \
+ (*(ONCE_CONTROL) == 0 ? (*(ONCE_CONTROL) = ~ 0, INITFUNCTION (), 0) : 0)
+
+#endif
+
+/* ========================================================================= */
+
+/* Macros with built-in error handling. */
+
+/* -------------------------- gl_lock_t datatype -------------------------- */
+
+#define gl_lock_init(NAME) \
+ do \
+ { \
+ if (glthread_lock_init (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+#define gl_lock_lock(NAME) \
+ do \
+ { \
+ if (glthread_lock_lock (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+#define gl_lock_unlock(NAME) \
+ do \
+ { \
+ if (glthread_lock_unlock (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+#define gl_lock_destroy(NAME) \
+ do \
+ { \
+ if (glthread_lock_destroy (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+
+/* ------------------------- gl_rwlock_t datatype ------------------------- */
+
+#define gl_rwlock_init(NAME) \
+ do \
+ { \
+ if (glthread_rwlock_init (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+#define gl_rwlock_rdlock(NAME) \
+ do \
+ { \
+ if (glthread_rwlock_rdlock (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+#define gl_rwlock_wrlock(NAME) \
+ do \
+ { \
+ if (glthread_rwlock_wrlock (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+#define gl_rwlock_unlock(NAME) \
+ do \
+ { \
+ if (glthread_rwlock_unlock (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+#define gl_rwlock_destroy(NAME) \
+ do \
+ { \
+ if (glthread_rwlock_destroy (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+
+/* --------------------- gl_recursive_lock_t datatype --------------------- */
+
+#define gl_recursive_lock_init(NAME) \
+ do \
+ { \
+ if (glthread_recursive_lock_init (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+#define gl_recursive_lock_lock(NAME) \
+ do \
+ { \
+ if (glthread_recursive_lock_lock (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+#define gl_recursive_lock_unlock(NAME) \
+ do \
+ { \
+ if (glthread_recursive_lock_unlock (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+#define gl_recursive_lock_destroy(NAME) \
+ do \
+ { \
+ if (glthread_recursive_lock_destroy (&NAME)) \
+ abort (); \
+ } \
+ while (0)
+
+/* -------------------------- gl_once_t datatype -------------------------- */
+
+#define gl_once(NAME, INITFUNCTION) \
+ do \
+ { \
+ if (glthread_once (&NAME, INITFUNCTION)) \
+ abort (); \
+ } \
+ while (0)
+
+/* ========================================================================= */
+
+#endif /* _LOCK_H */
diff --git a/grub-core/lib/gnulib/glthread/threadlib.c b/grub-core/lib/gnulib/glthread/threadlib.c
new file mode 100644
index 0000000..a5ebd9b
--- /dev/null
+++ b/grub-core/lib/gnulib/glthread/threadlib.c
@@ -0,0 +1,73 @@
+/* Multithreading primitives.
+ Copyright (C) 2005-2019 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <https://www.gnu.org/licenses/>. */
+
+/* Written by Bruno Haible <bruno@clisp.org>, 2005. */
+
+#include <config.h>
+
+/* ========================================================================= */
+
+#if USE_POSIX_THREADS
+
+/* Use the POSIX threads library. */
+
+# include <pthread.h>
+# include <stdlib.h>
+
+# if PTHREAD_IN_USE_DETECTION_HARD
+
+/* The function to be executed by a dummy thread. */
+static void *
+dummy_thread_func (void *arg)
+{
+ return arg;
+}
+
+int
+glthread_in_use (void)
+{
+ static int tested;
+ static int result; /* 1: linked with -lpthread, 0: only with libc */
+
+ if (!tested)
+ {
+ pthread_t thread;
+
+ if (pthread_create (&thread, NULL, dummy_thread_func, NULL) != 0)
+ /* Thread creation failed. */
+ result = 0;
+ else
+ {
+ /* Thread creation works. */
+ void *retval;
+ if (pthread_join (thread, &retval) != 0)
+ abort ();
+ result = 1;
+ }
+ tested = 1;
+ }
+ return result;
+}
+
+# endif
+
+#endif
+
+/* ========================================================================= */
+
+/* This declaration is solely to ensure that after preprocessing
+ this file is never empty. */
+typedef int dummy;