/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
* The purpose of this file is to only implement those functions that
* are truly needed to support the afs code base.
*
- * A secondary goal is to allow a "real" pthread implementation to
+ * A secondary goal is to allow a "real" pthread implementation to
* replace this file without any modification to code that depends upon
* this file
*
* as their UNIX prototypes.
* Where possible, the POSIX specified return values are used.
* For situations where an error can occur, but no corresponding
- * POSIX error value exists, unique (within a given function) negative
+ * POSIX error value exists, unique (within a given function) negative
* numbers are used for errors to avoid collsions with the errno
* style values.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
+#include <process.h>
#include <errno.h>
#include <sys/timeb.h>
int rc = 0;
if ((mp != NULL) && (attr == NULL)) {
+ memset(mp, 0, sizeof(*mp));
InitializeCriticalSection(&mp->cs);
mp->isLocked = 0;
mp->tid = 0;
if (mp->isLocked) {
/* same thread tried to recursively lock, fail */
LeaveCriticalSection(&mp->cs);
- rc = EBUSY;
+ rc = EDEADLK;
} else {
mp->isLocked = 1;
mp->tid = GetCurrentThreadId();
mp->isLocked = 1;
mp->tid = GetCurrentThreadId();
} else {
- /*
+ /*
* same thread tried to recursively lock this mutex.
- * Under real POSIX, this would cause a deadlock, but NT only
+ * Under real POSIX, this would cause a deadlock, but NT only
* supports recursive mutexes so we indicate the situation
* by returning EDEADLK.
*/
LeaveCriticalSection(&mp->cs);
rc = EDEADLK;
+#ifdef PTHREAD_DEBUG
+ DebugBreak();
+#endif
}
} else {
+#ifdef PTHREAD_DEBUG
+ DebugBreak();
+#endif
rc = EINVAL;
}
-
+
return rc;
}
mp->tid = 0;
LeaveCriticalSection(&mp->cs);
} else {
- rc = 0;
+#ifdef PTHREAD_DEBUG
+ DebugBreak();
+#endif
+ rc = EPERM;
}
} else {
+#ifdef PTHREAD_DEBUG
+ DebugBreak();
+#endif
rc = EINVAL;
}
return rc;
if (mp != NULL) {
DeleteCriticalSection(&mp->cs);
} else {
+#ifdef PTHREAD_DEBUG
+ DebugBreak();
+#endif
rc = EINVAL;
}
return rc;
}
+int pthread_rwlock_destroy(pthread_rwlock_t *rwp)
+{
+ int rc = 0;
+
+ if (rwp != NULL) {
+ pthread_mutex_destroy(&rwp->read_access_completion_mutex);
+ pthread_mutex_destroy(&rwp->write_access_mutex);
+ pthread_cond_destroy(&rwp->read_access_completion_wait);
+ } else {
+#ifdef PTHREAD_DEBUG
+ DebugBreak();
+#endif
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+int pthread_rwlock_init(pthread_rwlock_t *rwp, const pthread_rwlockattr_t *attr)
+{
+ int rc = 0;
+
+ if (rwp == NULL)
+ return EINVAL;
+
+ rwp->readers = 0;
+
+ rc = pthread_mutex_init(&rwp->write_access_mutex, NULL);
+ if (rc)
+ return rc;
+
+ rc = pthread_mutex_init(&rwp->read_access_completion_mutex, NULL);
+ if (rc)
+ goto error1;
+
+ rc = pthread_cond_init(&rwp->read_access_completion_wait, NULL);
+ if (rc == 0)
+ return 0; /* success */
+
+ pthread_mutex_destroy(&rwp->read_access_completion_mutex);
+
+ error1:
+ pthread_mutex_destroy(&rwp->write_access_mutex);
+
+ return rc;
+}
+
+int pthread_rwlock_wrlock(pthread_rwlock_t *rwp)
+{
+ int rc = 0;
+
+ if (rwp == NULL)
+ return EINVAL;
+
+ if ((rc = pthread_mutex_lock(&rwp->write_access_mutex)) != 0)
+ return rc;
+
+ if ((rc = pthread_mutex_lock(&rwp->read_access_completion_mutex)) != 0)
+ {
+ pthread_mutex_unlock(&rwp->write_access_mutex);
+ return rc;
+ }
+
+ while (rc == 0 && rwp->readers > 0) {
+ rc = pthread_cond_wait( &rwp->read_access_completion_wait,
+ &rwp->read_access_completion_mutex);
+ }
+
+ pthread_mutex_unlock(&rwp->read_access_completion_mutex);
+
+ if (rc)
+ pthread_mutex_unlock(&rwp->write_access_mutex);
+
+ return rc;
+}
+
+int pthread_rwlock_rdlock(pthread_rwlock_t *rwp)
+{
+ int rc = 0;
+
+ if (rwp == NULL)
+ return EINVAL;
+
+ if ((rc = pthread_mutex_lock(&rwp->write_access_mutex)) != 0)
+ return rc;
+
+ if ((rc = pthread_mutex_lock(&rwp->read_access_completion_mutex)) != 0)
+ {
+ pthread_mutex_unlock(&rwp->write_access_mutex);
+ return rc;
+ }
+
+ rwp->readers++;
+
+ pthread_mutex_unlock(&rwp->read_access_completion_mutex);
+
+ pthread_mutex_unlock(&rwp->write_access_mutex);
+
+ return rc;
+
+}
+
+int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwp)
+{
+ int rc = 0;
+
+ if (rwp == NULL)
+ return EINVAL;
+
+ if ((rc = pthread_mutex_trylock(&rwp->write_access_mutex)) != 0)
+ return rc;
+
+ if ((rc = pthread_mutex_trylock(&rwp->read_access_completion_mutex)) != 0) {
+ pthread_mutex_unlock(&rwp->write_access_mutex);
+ return rc;
+ }
+
+ rwp->readers++;
+
+ pthread_mutex_unlock(&rwp->read_access_completion_mutex);
+
+ pthread_mutex_unlock(&rwp->write_access_mutex);
+
+ return rc;
+}
+
+int pthread_rwlock_trywrlock(pthread_rwlock_t *rwp)
+{
+ int rc = 0;
+
+ if (rwp == NULL)
+ return EINVAL;
+
+ if ((rc = pthread_mutex_trylock(&rwp->write_access_mutex)) != 0)
+ return rc;
+
+ if ((rc = pthread_mutex_trylock(&rwp->read_access_completion_mutex)) != 0)
+ {
+ pthread_mutex_unlock(&rwp->write_access_mutex);
+ return rc;
+ }
+
+ if (rwp->readers > 0)
+ rc = EBUSY;
+
+ pthread_mutex_unlock(&rwp->read_access_completion_mutex);
+
+ if (rc)
+ pthread_mutex_unlock(&rwp->write_access_mutex);
+
+ return rc;
+}
+
+int pthread_rwlock_unlock(pthread_rwlock_t *rwp)
+{
+ int rc = 0;
+
+ if (rwp == NULL)
+ return EINVAL;
+
+ rc = pthread_mutex_trylock(&rwp->write_access_mutex);
+ if (rc != EDEADLK)
+ {
+ /* unlock a read lock */
+ if (rc == 0)
+ pthread_mutex_unlock(&rwp->write_access_mutex);
+
+ if ((rc = pthread_mutex_lock(&rwp->read_access_completion_mutex)) != 0)
+ {
+ pthread_mutex_unlock(&rwp->write_access_mutex);
+ return rc;
+ }
+
+ if (rwp->readers <= 0)
+ {
+ rc = EINVAL;
+ }
+ else
+ {
+ if (--rwp->readers == 0)
+ pthread_cond_broadcast(&rwp->read_access_completion_wait);
+ }
+
+ pthread_mutex_unlock(&rwp->read_access_completion_mutex);
+ }
+ else
+ {
+ /* unlock a write lock */
+ rc = pthread_mutex_unlock(&rwp->write_access_mutex);
+ }
+
+ return rc;
+}
+
+
/*
* keys is used to keep track of which keys are currently
* in use by the threads library. pthread_tsd_mutex is used
pthread_cache_done = 1;
}
+static void cleanup_pthread_cache(void) {
+ thread_p cur = NULL, next = NULL;
+
+ if (pthread_cache_done) {
+ for(queue_Scan(&active_Q, cur, next, thread)) {
+ queue_Remove(cur);
+ }
+ for(queue_Scan(&cache_Q, cur, next, thread)) {
+ queue_Remove(cur);
+ }
+
+ pthread_mutex_destroy(&active_Q_mutex);
+ pthread_mutex_destroy(&cache_Q_mutex);
+
+ pthread_cache_done = 0;
+ }
+}
+
static void put_thread(thread_p old) {
-
+
CloseHandle(old->t_handle);
pthread_mutex_lock(&cache_Q_mutex);
queue_Prepend(&cache_Q, old);
static thread_p get_thread() {
thread_p new = NULL;
-
+
pthread_mutex_lock(&cache_Q_mutex);
-
+
if (queue_IsEmpty(&cache_Q)) {
new = (thread_p) malloc(sizeof(thread_t));
if (new != NULL) {
new = queue_First(&cache_Q, thread);
queue_Remove(new);
}
-
+
pthread_mutex_unlock(&cache_Q_mutex);
- /*
+ /*
* Initialization done every time we hand out a thread_t
*/
new->has_been_joined = 0;
}
return new;
-
+
}
-
+
/*
* The thread start function signature is different on NT than the pthread
* spec so we create a tiny stub to map from one signature to the next.
} while(call_more_destructors);
}
+static void cleanup_global_tsd(void)
+{
+ thread_p cur = NULL, next = NULL;
+
+ if (tsd_done) {
+ for(queue_Scan(&active_Q, cur, next, thread)) {
+ tsd_free_all(cur->tsd);
+ }
+
+ TlsFree(tsd_pthread_index);
+ tsd_pthread_index = 0xFFFFFFFF;
+ TlsFree(tsd_index);
+ tsd_index = 0xFFFFFFFF;
+ tsd_done = 0;
+ }
+}
+
static DWORD WINAPI afs_pthread_create_stub(LPVOID param) {
pthread_create_t *t = (pthread_create_t *) param;
void *rc;
- /*
+ /*
* Initialize thread specific storage structures.
*/
static DWORD WINAPI terminate_thread_routine(LPVOID param) {
thread_p cur, next;
- size_t native_thread_count;
+ DWORD native_thread_count;
int should_terminate;
int terminate_thread_wakeup_list_index;
* Here, we've decided to wait for native threads et al.
* Fill out the wakeup_list.
*/
- memset(terminate_thread_wakeup_list, 0x00, (sizeof(HANDLE) *
+ memset(terminate_thread_wakeup_list, 0x00, (sizeof(HANDLE) *
(1+native_thread_count)));
terminate_thread_wakeup_list[0] = terminate_thread_wakeup_event;
sprintf(eventName, "terminate_thread_wakeup_event %d::%d", _getpid(), eventCount++);
terminate_thread_wakeup_event = CreateEvent((LPSECURITY_ATTRIBUTES) 0,
TRUE, FALSE, (LPCTSTR) eventName);
- terminate_thread_handle = CreateThread((LPSECURITY_ATTRIBUTES) 0, 0,
- terminate_thread_routine, (LPVOID) 0, 0,
+ terminate_thread_handle = CreateThread((LPSECURITY_ATTRIBUTES) 0, 0,
+ terminate_thread_routine, (LPVOID) 0, 0,
&terminate_thread_id);
} else {
SetEvent (terminate_thread_wakeup_event);
*/
pthread_mutex_lock(&active_Q_mutex);
queue_Prepend(&active_Q, t->me);
- t->me->t_handle = CreateThread((LPSECURITY_ATTRIBUTES) 0, 0,
- afs_pthread_create_stub, (LPVOID) t, 0,
+ t->me->t_handle = CreateThread((LPSECURITY_ATTRIBUTES) 0, 0,
+ afs_pthread_create_stub, (LPVOID) t, 0,
&t->me->NT_id);
if (t->me->t_handle == 0) {
- /*
+ /*
* we only free t if the thread wasn't created, otherwise
* it's free'd by the new thread.
*/
* attr parameter.
*/
if ((attr == NULL) && (cond != NULL)) {
+ memset(cond, 0, sizeof(*cond));
InitializeCriticalSection(&cond->cs);
queue_Init(&cond->waiting_threads);
} else {
* once they have been created, they stay in the cache for the life
* of the process.
*/
-
+
static struct rx_queue waiter_cache;
static CRITICAL_SECTION waiter_cache_cs;
static int waiter_cache_init;
static pthread_once_t waiter_cache_once = PTHREAD_ONCE_INIT;
-
+
static void init_waiter_cache(void) {
+ if (waiter_cache_init)
+ return;
+
+ memset(&waiter_cache_cs, 0, sizeof(waiter_cache_cs));
InitializeCriticalSection(&waiter_cache_cs);
- waiter_cache_init = 1;
queue_Init(&waiter_cache);
+ waiter_cache_init = 1;
+}
+
+static void cleanup_waiter_cache(void)
+{
+ cond_waiters_t * cur = NULL, * next = NULL;
+
+ if (waiter_cache_init) {
+ for(queue_Scan(&waiter_cache, cur, next, cond_waiter)) {
+ queue_Remove(cur);
+
+ CloseHandle(cur->event);
+ free(cur);
+ }
+
+ DeleteCriticalSection(&waiter_cache_cs);
+ waiter_cache_init = 0;
+ }
}
-
+
static cond_waiters_t *get_waiter() {
cond_waiters_t *new = NULL;
-
+
(waiter_cache_init || pthread_once(&waiter_cache_once, init_waiter_cache));
-
+
EnterCriticalSection(&waiter_cache_cs);
-
+
if (queue_IsEmpty(&waiter_cache)) {
new = (cond_waiters_t *) malloc(sizeof(cond_waiters_t));
if (new != NULL) {
new = queue_First(&waiter_cache, cond_waiter);
queue_Remove(new);
}
-
+
LeaveCriticalSection(&waiter_cache_cs);
return new;
-
+
}
-
+
static void put_waiter(cond_waiters_t *old) {
-
+
(waiter_cache_init || pthread_once(&waiter_cache_once, init_waiter_cache));
-
+
EnterCriticalSection(&waiter_cache_cs);
queue_Prepend(&waiter_cache, old);
LeaveCriticalSection(&waiter_cache_cs);
queue_Append(&cond->waiting_threads, my_entry);
LeaveCriticalSection(&cond->cs);
- if (!pthread_mutex_unlock(mutex)) {
+ if (pthread_mutex_unlock(mutex) == 0) {
switch(WaitForSingleObject(my_entry->event, time)) {
- case WAIT_FAILED:
- rc = -1;
- break;
- case WAIT_TIMEOUT:
- rc = ETIME;
- /*
- * This is a royal pain. We've timed out waiting
- * for the signal, but between the time out and here
- * it is possible that we were actually signalled by
- * another thread. So we grab the condition lock
- * and scan the waiting thread queue to see if we are
- * still there. If we are, we just remove ourselves.
- *
- * If we are no longer listed in the waiter queue,
- * it means that we were signalled after the time
- * out occurred and so we have to do another wait
- * WHICH HAS TO SUCCEED! In this case, we reset
- * rc to indicate that we were signalled.
- *
- * We have to wait or otherwise, the event
- * would be cached in the signalled state, which
- * is wrong. It might be more efficient to just
- * close and reopen the event.
- */
- EnterCriticalSection(&cond->cs);
- for(queue_Scan(&cond->waiting_threads, cur,
- next, cond_waiter)) {
- if (cur == my_entry) {
- hasnt_been_signalled = 1;
- break;
- }
- }
- if (hasnt_been_signalled) {
- queue_Remove(cur);
- } else {
- rc = 0;
- if (ResetEvent(my_entry->event)) {
- if (pthread_mutex_lock(mutex)) {
- rc = -5;
- }
- } else {
- rc = -6;
- }
- }
- LeaveCriticalSection(&cond->cs);
- break;
- case WAIT_ABANDONED:
- rc = -2;
- break;
- case WAIT_OBJECT_0:
- if (pthread_mutex_lock(mutex)) {
- rc = -3;
- }
- break;
- default:
- rc = -4;
- break;
- }
+ case WAIT_FAILED:
+ rc = -1;
+ break;
+ case WAIT_TIMEOUT:
+ rc = ETIMEDOUT;
+ /*
+ * This is a royal pain. We've timed out waiting
+ * for the signal, but between the time out and here
+ * it is possible that we were actually signalled by
+ * another thread. So we grab the condition lock
+ * and scan the waiting thread queue to see if we are
+ * still there. If we are, we just remove ourselves.
+ *
+ * If we are no longer listed in the waiter queue,
+ * it means that we were signalled after the time
+ * out occurred and so we have to do another wait
+ * WHICH HAS TO SUCCEED! In this case, we reset
+ * rc to indicate that we were signalled.
+ *
+ * We have to wait or otherwise, the event
+ * would be cached in the signalled state, which
+ * is wrong. It might be more efficient to just
+ * close and reopen the event.
+ */
+ EnterCriticalSection(&cond->cs);
+ for(queue_Scan(&cond->waiting_threads, cur,
+ next, cond_waiter)) {
+ if (cur == my_entry) {
+ hasnt_been_signalled = 1;
+ break;
+ }
+ }
+ if (hasnt_been_signalled) {
+ queue_Remove(cur);
+ } else {
+ rc = 0;
+ if (!ResetEvent(my_entry->event)) {
+ rc = -6;
+ }
+ }
+ LeaveCriticalSection(&cond->cs);
+ break;
+ case WAIT_ABANDONED:
+ rc = -2;
+ break;
+ case WAIT_OBJECT_0:
+ rc = 0;
+ break;
+ default:
+ rc = -4;
+ break;
+ }
+ if (pthread_mutex_lock(mutex) != 0) {
+ rc = -3;
+ }
} else {
rc = EINVAL;
}
int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) {
int rc = 0;
struct _timeb now, then;
- short n_milli, t_milli;
+ afs_uint32 n_milli, t_milli;
if (abstime->tv_nsec < 1000000000) {
/*
* pthread timedwait uses an absolute time, NT uses relative so
* we convert here. The millitm field in the timeb struct is
- * unsigned, but we need to do subtraction preserving the sign,
+ * unsigned, but we need to do subtraction preserving the sign,
* so we copy the fields into temporary variables.
*
* WARNING:
then.time = abstime->tv_sec;
t_milli = abstime->tv_nsec/1000000;
- if((then.time > now.time ||
+ if((then.time > now.time ||
(then.time == now.time && t_milli > n_milli))) {
if((t_milli -= n_milli) < 0) {
t_milli += 1000;
* Under NT, we can only wait for milliseconds, so we
* round up the wait time here.
*/
- rc = cond_wait_internal(cond, mutex,
- ((then.time * 1000) + (t_milli)));
+ rc = cond_wait_internal(cond, mutex,
+ (DWORD)((then.time * 1000) + (t_milli)));
} else {
rc = EINVAL;
}
EnterCriticalSection(&cond->cs);
/*
- * Empty the waiting_threads queue.
+ * Empty the waiting_threads queue.
*/
if (queue_IsNotEmpty(&cond->waiting_threads)) {
for(queue_Scan(&cond->waiting_threads, release_thread,
} else {
rc = EINVAL;
}
-
+
/*
* A previous version of this file had code to check the waiter
* queue and empty it here. This has been removed in the hopes
void *rc = NULL;
char **tsd = TlsGetValue(tsd_index);
- if (tsd == NULL)
- return NULL;
+ if (tsd == NULL)
+ return NULL;
if ((key > -1) && (key < PTHREAD_KEYS_MAX )) {
rc = (void *) *(tsd + key);
RaiseException(PTHREAD_EXIT_EXCEPTION, 0, 0, NULL);
}
+
+/*
+ * DllMain() -- Entry-point function called by the DllMainCRTStartup()
+ * function in the MSVC runtime DLL (msvcrt.dll).
+ *
+ * Note: the system serializes calls to this function.
+ */
+BOOL WINAPI
+DllMain(HINSTANCE dllInstHandle,/* instance handle for this DLL module */
+ DWORD reason, /* reason function is being called */
+ LPVOID reserved)
+{ /* reserved for future use */
+ switch (reason) {
+ case DLL_PROCESS_ATTACH:
+ /* library is being attached to a process */
+ /* disable thread attach/detach notifications */
+ (void)DisableThreadLibraryCalls(dllInstHandle);
+
+ pthread_once(&pthread_cache_once, create_once);
+ pthread_once(&global_tsd_once, tsd_once);
+ pthread_once(&waiter_cache_once, init_waiter_cache);
+ return TRUE;
+
+ case DLL_PROCESS_DETACH:
+ cleanup_waiter_cache();
+ cleanup_global_tsd();
+ cleanup_pthread_cache();
+ return TRUE;
+
+ default:
+ return FALSE;
+ }
+}
+