2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * This file contains a skeleton pthread implementation for NT.
12 * This is not intended to be a fully compliant pthread implementation
13 * The purpose of this file is to only implement those functions that
14 * are truly needed to support the afs code base.
16 * A secondary goal is to allow a "real" pthread implementation to
17 * replace this file without any modification to code that depends upon
20 * The function signatures and argument types are meant to be the same
21 * as their UNIX prototypes.
22 * Where possible, the POSIX specified return values are used.
23 * For situations where an error can occur, but no corresponding
24 * POSIX error value exists, unique (within a given function) negative
25 * numbers are used for errors to avoid collsions with the errno
29 #include <afs/param.h>
38 #include <sys/timeb.h>
40 #define PTHREAD_EXIT_EXCEPTION 0x1
43 * Posix threads uses static initialization for pthread_once control
44 * objects, and under NT, every sophisticated synchronization primitive
45 * uses procedural initialization. This forces the use of CompareExchange
46 * (aka test and set) and busy waiting for threads that compete to run
47 * a pthread_once'd function. We make these "busy" threads give up their
48 * timeslice - which should cause acceptable behavior on a single processor
49 * machine, but on a multiprocessor machine this could very well result
53 int pthread_once(pthread_once_t *once_control, void (*init_routine)(void)) {
56 if ((once_control != NULL) && (init_routine != NULL)) {
57 if (InterlockedExchange((LPLONG)&once_control->call_started,
60 once_control->call_running = 0;
62 /* use Sleep() since SwitchToThread() not available on Win95 */
63 while(once_control->call_running) Sleep(20);
72 * For now only support PTHREAD_PROCESS_PRIVATE mutexes.
73 * if PTHREAD_PROCESS_SHARED are required later they can be added
76 int pthread_mutex_init(pthread_mutex_t *mp, const pthread_mutexattr_t *attr) {
79 if ((mp != NULL) && (attr == NULL)) {
80 InitializeCriticalSection(&mp->cs);
90 * Under NT, critical sections can be locked recursively by the owning
91 * thread. This is opposite of the pthread spec, and so we keep track
92 * of the thread that has locked a critical section. If the same thread
93 * tries to lock a critical section more than once we fail.
95 int pthread_mutex_trylock(pthread_mutex_t *mp) {
99 /* TryEnterCriticalSection() not available on Win95, so just wait for
100 * the lock. Correct code generally can't depend on how long the
101 * function takes to return, so the only code that will be broken is
102 * that for which 1) the mutex *mp is obtained and never released or
103 * 2) the mutex *mp is intentionally held until trylock() returns.
104 * These cases are unusual and don't appear in normal (non-test) AFS
105 * code; furthermore, we can reduce (but not eliminate!) the problem by
106 * sneaking a look at isLocked even though we don't hold the
107 * CRITICAL_SECTION in mutex *mp and are thus vulnerable to race
108 * conditions. Note that self-deadlock isn't a problem since
109 * CRITICAL_SECTION objects are recursive.
111 * Given the very restricted usage of the pthread library on Windows 95,
112 * we can live with these limitations.
118 rc = pthread_mutex_lock(mp);
124 /* TryEnterCriticalSection() provided on other MS platforms of interest */
126 if (TryEnterCriticalSection(&mp->cs)) {
128 /* same thread tried to recursively lock, fail */
129 LeaveCriticalSection(&mp->cs);
133 mp->tid = GetCurrentThreadId();
142 #endif /* AFS_WIN95_ENV */
148 int pthread_mutex_lock(pthread_mutex_t *mp) {
152 EnterCriticalSection(&mp->cs);
155 mp->tid = GetCurrentThreadId();
158 * same thread tried to recursively lock this mutex.
159 * Under real POSIX, this would cause a deadlock, but NT only
160 * supports recursive mutexes so we indicate the situation
161 * by returning EDEADLK.
163 LeaveCriticalSection(&mp->cs);
179 int pthread_mutex_unlock(pthread_mutex_t *mp) {
183 if (mp->tid == GetCurrentThreadId()) {
186 LeaveCriticalSection(&mp->cs);
202 int pthread_mutex_destroy(pthread_mutex_t *mp) {
206 DeleteCriticalSection(&mp->cs);
217 int pthread_rwlock_destroy(pthread_rwlock_t *rwp)
222 pthread_mutex_destroy(&rwp->read_access_completion_mutex);
223 pthread_mutex_destroy(&rwp->write_access_mutex);
224 pthread_cond_destroy(&rwp->read_access_completion_wait);
235 int pthread_rwlock_init(pthread_rwlock_t *rwp, const pthread_rwlockattr_t *attr)
244 rc = pthread_mutex_init(&rwp->write_access_mutex, NULL);
248 rc = pthread_mutex_init(&rwp->read_access_completion_mutex, NULL);
252 rc = pthread_cond_init(&rwp->read_access_completion_wait, NULL);
254 return 0; /* success */
256 pthread_mutex_destroy(&rwp->read_access_completion_mutex);
259 pthread_mutex_destroy(&rwp->write_access_mutex);
264 int pthread_rwlock_wrlock(pthread_rwlock_t *rwp)
271 if ((rc = pthread_mutex_lock(&rwp->write_access_mutex)) != 0)
274 if ((rc = pthread_mutex_lock(&rwp->read_access_completion_mutex)) != 0)
276 pthread_mutex_unlock(&rwp->write_access_mutex);
280 while (rc == 0 && rwp->readers > 0) {
281 rc = pthread_cond_wait( &rwp->read_access_completion_wait,
282 &rwp->read_access_completion_mutex);
285 pthread_mutex_unlock(&rwp->read_access_completion_mutex);
288 pthread_mutex_unlock(&rwp->write_access_mutex);
293 int pthread_rwlock_rdlock(pthread_rwlock_t *rwp)
300 if ((rc = pthread_mutex_lock(&rwp->write_access_mutex)) != 0)
303 if ((rc = pthread_mutex_lock(&rwp->read_access_completion_mutex)) != 0)
305 pthread_mutex_unlock(&rwp->write_access_mutex);
311 pthread_mutex_unlock(&rwp->read_access_completion_mutex);
313 pthread_mutex_unlock(&rwp->write_access_mutex);
319 int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwp)
326 if ((rc = pthread_mutex_trylock(&rwp->write_access_mutex)) != 0)
329 if ((rc = pthread_mutex_trylock(&rwp->read_access_completion_mutex)) != 0) {
330 pthread_mutex_unlock(&rwp->write_access_mutex);
336 pthread_mutex_unlock(&rwp->read_access_completion_mutex);
338 pthread_mutex_unlock(&rwp->write_access_mutex);
343 int pthread_rwlock_trywrlock(pthread_rwlock_t *rwp)
350 if ((rc = pthread_mutex_trylock(&rwp->write_access_mutex)) != 0)
353 if ((rc = pthread_mutex_trylock(&rwp->read_access_completion_mutex)) != 0)
355 pthread_mutex_unlock(&rwp->write_access_mutex);
359 if (rwp->readers > 0)
362 pthread_mutex_unlock(&rwp->read_access_completion_mutex);
365 pthread_mutex_unlock(&rwp->write_access_mutex);
370 int pthread_rwlock_unlock(pthread_rwlock_t *rwp)
377 rc = pthread_mutex_trylock(&rwp->write_access_mutex);
380 /* unlock a read lock */
382 pthread_mutex_unlock(&rwp->write_access_mutex);
384 if ((rc = pthread_mutex_lock(&rwp->read_access_completion_mutex)) != 0)
386 pthread_mutex_unlock(&rwp->write_access_mutex);
390 if (rwp->readers <= 0)
396 if (--rwp->readers == 0)
397 pthread_cond_broadcast(&rwp->read_access_completion_wait);
400 pthread_mutex_unlock(&rwp->read_access_completion_mutex);
404 /* unlock a write lock */
405 rc = pthread_mutex_unlock(&rwp->write_access_mutex);
413 * keys is used to keep track of which keys are currently
414 * in use by the threads library. pthread_tsd_mutex is used
417 * The bookkeeping for keys in use and destructor function/key is
418 * at the library level. Each individual thread only keeps its
419 * per key data value. This implies that the keys array and the
420 * tsd array in the pthread_t structure need to always be exactly
421 * the same size since the same index is used for both arrays.
426 void (*destructor)(void *);
427 } pthread_tsd_table_t;
429 static pthread_tsd_table_t keys[PTHREAD_KEYS_MAX];
430 static pthread_mutex_t pthread_tsd_mutex;
431 static pthread_once_t pthread_tsd_once = PTHREAD_ONCE_INIT;
434 * In order to support p_self() and p_join() under NT,
435 * we have to keep our own list of active threads and provide a mapping
436 * function that maps the NT thread id to our internal structure.
437 * The main reason that this is necessary is that GetCurrentThread
438 * returns a special constant not an actual handle to the thread.
439 * This makes it impossible to write a p_self() function that works
440 * with only the native NT functions.
443 static struct rx_queue active_Q;
444 static struct rx_queue cache_Q;
446 static pthread_mutex_t active_Q_mutex;
447 static pthread_mutex_t cache_Q_mutex;
449 static pthread_once_t pthread_cache_once = PTHREAD_ONCE_INIT;
450 static int pthread_cache_done;
452 typedef struct thread {
453 struct rx_queue thread_queue;
456 pthread_cond_t wait_terminate;
464 } thread_t, *thread_p;
466 static void create_once(void) {
467 queue_Init(&active_Q);
468 queue_Init(&cache_Q);
469 pthread_mutex_init(&active_Q_mutex, (const pthread_mutexattr_t*)0);
470 pthread_mutex_init(&cache_Q_mutex, (const pthread_mutexattr_t*)0);
471 pthread_cache_done = 1;
474 static void cleanup_pthread_cache(void) {
475 thread_p cur = NULL, next = NULL;
477 if (pthread_cache_done) {
478 for(queue_Scan(&active_Q, cur, next, thread)) {
481 for(queue_Scan(&cache_Q, cur, next, thread)) {
485 pthread_mutex_destroy(&active_Q_mutex);
486 pthread_mutex_destroy(&cache_Q_mutex);
488 pthread_cache_done = 0;
492 static void put_thread(thread_p old) {
494 CloseHandle(old->t_handle);
495 pthread_mutex_lock(&cache_Q_mutex);
496 queue_Prepend(&cache_Q, old);
497 pthread_mutex_unlock(&cache_Q_mutex);
500 static thread_p get_thread() {
503 pthread_mutex_lock(&cache_Q_mutex);
505 if (queue_IsEmpty(&cache_Q)) {
506 new = (thread_p) malloc(sizeof(thread_t));
509 * One time initialization - we assume threads put back have
510 * unlocked mutexes and condition variables with no waiters
512 * These functions cannot fail currently.
514 pthread_cond_init(&new->wait_terminate,(const pthread_condattr_t *)0);
517 new = queue_First(&cache_Q, thread);
521 pthread_mutex_unlock(&cache_Q_mutex);
524 * Initialization done every time we hand out a thread_t
530 new->waiter_count = 0;
531 new->has_been_joined = 0;
538 * The thread start function signature is different on NT than the pthread
539 * spec so we create a tiny stub to map from one signature to the next.
540 * This assumes that a void * can be stored within a DWORD.
544 void *(*func)(void *);
546 char *tsd[PTHREAD_KEYS_MAX];
550 static DWORD tsd_index = 0xffffffff;
551 static DWORD tsd_pthread_index = 0xffffffff;
552 static pthread_once_t global_tsd_once = PTHREAD_ONCE_INIT;
555 static void tsd_once(void) {
556 while(tsd_index == 0xffffffff) {
557 tsd_index = TlsAlloc();
559 while(tsd_pthread_index == 0xffffffff) {
560 tsd_pthread_index = TlsAlloc();
565 static void tsd_free_all(char *tsd[PTHREAD_KEYS_MAX]) {
566 int call_more_destructors = 0;
570 void (*destructor)(void *);
571 call_more_destructors = 0;
572 for(i=0;i<PTHREAD_KEYS_MAX;i++) {
573 if (tsd[i] != NULL) {
574 destructor = keys[i].destructor;
575 value = (void *)tsd[i];
577 if (destructor != NULL) {
580 * A side-effect of calling a destructor function is that
581 * more thread specific may be created for this thread.
582 * If we call a destructor, we must recycle through the
583 * entire list again and run any new destructors.
585 call_more_destructors = 1;
589 } while(call_more_destructors);
592 static void cleanup_global_tsd(void)
594 thread_p cur = NULL, next = NULL;
597 for(queue_Scan(&active_Q, cur, next, thread)) {
598 tsd_free_all(cur->tsd);
601 TlsFree(tsd_pthread_index);
602 tsd_pthread_index = 0xFFFFFFFF;
604 tsd_index = 0xFFFFFFFF;
609 static DWORD WINAPI afs_pthread_create_stub(LPVOID param) {
610 pthread_create_t *t = (pthread_create_t *) param;
614 * Initialize thread specific storage structures.
617 memset(t->tsd, 0, (sizeof(char *) * PTHREAD_KEYS_MAX));
618 (tsd_done || pthread_once(&global_tsd_once, tsd_once));
619 TlsSetValue(tsd_index, (LPVOID) (t->tsd));
620 TlsSetValue(tsd_pthread_index, (LPVOID) (t->me));
623 * Call the function the user passed to pthread_create and catch the
624 * pthread exit exception if it is raised.
628 rc = (*(t->func))(t->arg);
629 } __except(GetExceptionCode() == PTHREAD_EXIT_EXCEPTION) {
630 rc = t->me->rc; /* rc is set at pthread_exit */
634 * Cycle through the thread specific data for this thread and
635 * call the destructor function for each non-NULL datum
638 tsd_free_all (t->tsd);
642 * If we are joinable, signal any waiters.
645 pthread_mutex_lock(&active_Q_mutex);
646 if (t->me->is_joinable) {
649 if (t->me->waiter_count) {
650 pthread_cond_broadcast(&t->me->wait_terminate);
656 pthread_mutex_unlock(&active_Q_mutex);
663 * If a pthread function is called on a thread which was not created by
664 * pthread_create(), that thread will have an entry added to the active_Q
665 * by pthread_self(). When the thread terminates, we need to know
666 * about it, so that we can perform cleanup. A dedicated thread is therefore
667 * maintained, which watches for any thread marked "native_thread==1"
668 * in the active_Q to terminate. The thread spends most of its time sleeping:
669 * it can be signalled by a dedicated event in order to alert it to the
670 * presense of a new thread to watch, or will wake up automatically when
671 * a native thread terminates.
674 static DWORD terminate_thread_id = 0;
675 static HANDLE terminate_thread_handle = INVALID_HANDLE_VALUE;
676 static HANDLE terminate_thread_wakeup_event = INVALID_HANDLE_VALUE;
677 static HANDLE *terminate_thread_wakeup_list = NULL;
678 static size_t terminate_thread_wakeup_list_size = 0;
680 static DWORD WINAPI terminate_thread_routine(LPVOID param) {
682 DWORD native_thread_count;
683 int should_terminate;
684 int terminate_thread_wakeup_list_index;
688 * Grab the active_Q_mutex, and while we hold it, scan the active_Q
689 * to see how many native threads we need to watch. If we don't need
690 * to watch any, we can stop this watcher thread entirely (or not);
691 * if we do need to watch some, fill the terminate_thread_wakeup_list
692 * array and go to sleep.
696 native_thread_count = 0;
697 should_terminate = FALSE;
698 pthread_mutex_lock(&active_Q_mutex);
700 for(queue_Scan(&active_Q, cur, next, thread)) {
701 if (cur->native_thread)
702 ++native_thread_count;
706 * At this point we could decide to terminate this watcher thread
707 * whenever there are no longer any native threads to watch--however,
708 * since thread creation is a time-consuming thing, and since this
709 * thread spends all its time sleeping anyway, there's no real
710 * compelling reason to do so. Thus, the following statement is
713 * if (!native_thread_count) {
714 * should_terminate = TRUE;
717 * Restore the snippet above to cause this watcher thread to only
718 * live whenever there are native threads to watch.
723 * Make sure that our wakeup_list array is large enough to contain
724 * the handles of all the native threads /and/ to contain an
725 * entry for our wakeup_event (in case another native thread comes
728 if (terminate_thread_wakeup_list_size < (1+native_thread_count)) {
729 if (terminate_thread_wakeup_list)
730 free (terminate_thread_wakeup_list);
731 terminate_thread_wakeup_list = (HANDLE*)malloc (sizeof(HANDLE) *
732 (1+native_thread_count));
733 if (terminate_thread_wakeup_list == NULL) {
734 should_terminate = TRUE;
736 terminate_thread_wakeup_list_size = 1+native_thread_count;
740 if (should_terminate) {
742 * Here, we've decided to terminate this watcher thread.
743 * Free our wakeup event and wakeup list, then release the
744 * active_Q_mutex and break this loop.
746 if (terminate_thread_wakeup_list)
747 free (terminate_thread_wakeup_list);
748 CloseHandle (terminate_thread_wakeup_event);
749 terminate_thread_id = 0;
750 terminate_thread_handle = INVALID_HANDLE_VALUE;
751 terminate_thread_wakeup_event = INVALID_HANDLE_VALUE;
752 terminate_thread_wakeup_list = NULL;
753 terminate_thread_wakeup_list_size = 0;
754 pthread_mutex_unlock(&active_Q_mutex);
758 * Here, we've decided to wait for native threads et al.
759 * Fill out the wakeup_list.
761 memset(terminate_thread_wakeup_list, 0x00, (sizeof(HANDLE) *
762 (1+native_thread_count)));
764 terminate_thread_wakeup_list[0] = terminate_thread_wakeup_event;
765 terminate_thread_wakeup_list_index = 1;
769 for(queue_Scan(&active_Q, cur, next, thread)) {
770 if (cur->native_thread) {
771 terminate_thread_wakeup_list[terminate_thread_wakeup_list_index]
773 ++terminate_thread_wakeup_list_index;
777 ResetEvent (terminate_thread_wakeup_event);
780 pthread_mutex_unlock(&active_Q_mutex);
783 * Time to sleep. We'll wake up if either of the following happen:
784 * 1) Someone sets the terminate_thread_wakeup_event (this will
785 * happen if another native thread gets added to the active_Q)
786 * 2) One or more of the native threads terminate
788 terminate_thread_wakeup_list_index = WaitForMultipleObjects(
789 1+native_thread_count,
790 terminate_thread_wakeup_list,
795 * If we awoke from sleep because an event other than
796 * terminate_thread_wakeup_event was triggered, it means the
797 * specified thread has terminated. (If more than one thread
798 * terminated, we'll handle this first one and loop around--
799 * the event's handle will still be triggered, so we just won't
800 * block at all when we sleep next time around.)
802 if (terminate_thread_wakeup_list_index > 0) {
803 pthread_mutex_lock(&active_Q_mutex);
807 for(queue_Scan(&active_Q, cur, next, thread)) {
808 if (cur->t_handle == terminate_thread_wakeup_list[ terminate_thread_wakeup_list_index ])
814 * Cycle through the thread specific data for the specified
815 * thread and call the destructor function for each non-NULL
816 * datum. Then remove the thread_t from active_Q and put it
817 * back on cache_Q for possible later re-use.
819 if(cur->tsd != NULL) {
820 tsd_free_all(cur->tsd);
828 pthread_mutex_unlock(&active_Q_mutex);
835 static void pthread_sync_terminate_thread(void) {
836 (pthread_cache_done || pthread_once(&pthread_cache_once, create_once));
838 if (terminate_thread_handle == INVALID_HANDLE_VALUE) {
839 CHAR eventName[MAX_PATH];
840 static eventCount = 0;
841 sprintf(eventName, "terminate_thread_wakeup_event %d::%d", _getpid(), eventCount++);
842 terminate_thread_wakeup_event = CreateEvent((LPSECURITY_ATTRIBUTES) 0,
843 TRUE, FALSE, (LPCTSTR) eventName);
844 terminate_thread_handle = CreateThread((LPSECURITY_ATTRIBUTES) 0, 0,
845 terminate_thread_routine, (LPVOID) 0, 0,
846 &terminate_thread_id);
848 SetEvent (terminate_thread_wakeup_event);
854 * Only support the detached attribute specifier for pthread_create.
855 * Under NT, thread stacks grow automatically as needed.
858 int pthread_create(pthread_t *tid, const pthread_attr_t *attr, void *(*func)(void *), void *arg) {
860 pthread_create_t *t = NULL;
862 (pthread_cache_done || pthread_once(&pthread_cache_once, create_once));
864 if ((tid != NULL) && (func != NULL)) {
865 if ((t = (pthread_create_t *) malloc(sizeof(pthread_create_t))) &&
866 (t->me = get_thread()) ) {
869 *tid = (pthread_t) t->me;
871 t->me->is_joinable = attr->is_joinable;
873 t->me->is_joinable = PTHREAD_CREATE_JOINABLE;
875 t->me->native_thread = 0;
878 * At the point (before we actually create the thread)
879 * we need to add our entry to the active queue. This ensures
880 * us that other threads who may run after this thread returns
881 * will find an entry for the create thread regardless of
882 * whether the newly created thread has run or not.
883 * In the event the thread create fails, we will have temporarily
884 * added an entry to the list that was never valid, but we
885 * (i.e. the thread that is calling thread_create) are the
886 * only one who could possibly know about the bogus entry
887 * since we hold the active_Q_mutex.
889 pthread_mutex_lock(&active_Q_mutex);
890 queue_Prepend(&active_Q, t->me);
891 t->me->t_handle = CreateThread((LPSECURITY_ATTRIBUTES) 0, 0,
892 afs_pthread_create_stub, (LPVOID) t, 0,
894 if (t->me->t_handle == 0) {
896 * we only free t if the thread wasn't created, otherwise
897 * it's free'd by the new thread.
904 pthread_mutex_unlock(&active_Q_mutex);
917 int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr) {
921 * Only support default attribute -> must pass a NULL pointer for
924 if ((attr == NULL) && (cond != NULL)) {
925 InitializeCriticalSection(&cond->cs);
926 queue_Init(&cond->waiting_threads);
935 * In order to optimize the performance of condition variables,
936 * we maintain a pool of cond_waiter_t's that have been dynamically
937 * allocated. There is no attempt made to garbage collect these -
938 * once they have been created, they stay in the cache for the life
942 static struct rx_queue waiter_cache;
943 static CRITICAL_SECTION waiter_cache_cs;
944 static int waiter_cache_init;
945 static pthread_once_t waiter_cache_once = PTHREAD_ONCE_INIT;
947 static void init_waiter_cache(void) {
948 InitializeCriticalSection(&waiter_cache_cs);
949 queue_Init(&waiter_cache);
950 waiter_cache_init = 1;
953 static void cleanup_waiter_cache(void)
955 cond_waiters_t * cur = NULL, * next = NULL;
957 if (waiter_cache_init) {
958 for(queue_Scan(&waiter_cache, cur, next, cond_waiter)) {
961 CloseHandle(cur->event);
965 DeleteCriticalSection(&waiter_cache_cs);
966 waiter_cache_init = 0;
970 static cond_waiters_t *get_waiter() {
971 cond_waiters_t *new = NULL;
973 (waiter_cache_init || pthread_once(&waiter_cache_once, init_waiter_cache));
975 EnterCriticalSection(&waiter_cache_cs);
977 if (queue_IsEmpty(&waiter_cache)) {
978 new = (cond_waiters_t *) malloc(sizeof(cond_waiters_t));
980 CHAR eventName[MAX_PATH];
981 static eventCount = 0;
982 sprintf(eventName, "cond_waiters_t %d::%d", _getpid(), eventCount++);
983 new->event = CreateEvent((LPSECURITY_ATTRIBUTES) 0, FALSE,
984 FALSE, (LPCTSTR) eventName);
985 if (new->event == NULL) {
991 new = queue_First(&waiter_cache, cond_waiter);
995 LeaveCriticalSection(&waiter_cache_cs);
1000 static void put_waiter(cond_waiters_t *old) {
1002 (waiter_cache_init || pthread_once(&waiter_cache_once, init_waiter_cache));
1004 EnterCriticalSection(&waiter_cache_cs);
1005 queue_Prepend(&waiter_cache, old);
1006 LeaveCriticalSection(&waiter_cache_cs);
1009 static int cond_wait_internal(pthread_cond_t *cond, pthread_mutex_t *mutex, const DWORD time) {
1011 cond_waiters_t *my_entry = get_waiter();
1012 cond_waiters_t *cur, *next;
1013 int hasnt_been_signalled=0;
1015 if ((cond != NULL) && (mutex != NULL) && (my_entry != NULL)) {
1016 EnterCriticalSection(&cond->cs);
1017 queue_Append(&cond->waiting_threads, my_entry);
1018 LeaveCriticalSection(&cond->cs);
1020 if (pthread_mutex_unlock(mutex) == 0) {
1021 switch(WaitForSingleObject(my_entry->event, time)) {
1028 * This is a royal pain. We've timed out waiting
1029 * for the signal, but between the time out and here
1030 * it is possible that we were actually signalled by
1031 * another thread. So we grab the condition lock
1032 * and scan the waiting thread queue to see if we are
1033 * still there. If we are, we just remove ourselves.
1035 * If we are no longer listed in the waiter queue,
1036 * it means that we were signalled after the time
1037 * out occurred and so we have to do another wait
1038 * WHICH HAS TO SUCCEED! In this case, we reset
1039 * rc to indicate that we were signalled.
1041 * We have to wait or otherwise, the event
1042 * would be cached in the signalled state, which
1043 * is wrong. It might be more efficient to just
1044 * close and reopen the event.
1046 EnterCriticalSection(&cond->cs);
1047 for(queue_Scan(&cond->waiting_threads, cur,
1048 next, cond_waiter)) {
1049 if (cur == my_entry) {
1050 hasnt_been_signalled = 1;
1054 if (hasnt_been_signalled) {
1058 if (!ResetEvent(my_entry->event)) {
1062 LeaveCriticalSection(&cond->cs);
1064 case WAIT_ABANDONED:
1074 if (pthread_mutex_lock(mutex) != 0) {
1084 if (my_entry != NULL) {
1085 put_waiter(my_entry);
1091 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) {
1094 rc = cond_wait_internal(cond, mutex, INFINITE);
1098 int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) {
1100 struct _timeb now, then;
1101 afs_uint32 n_milli, t_milli;
1103 if (abstime->tv_nsec < 1000000000) {
1106 * pthread timedwait uses an absolute time, NT uses relative so
1107 * we convert here. The millitm field in the timeb struct is
1108 * unsigned, but we need to do subtraction preserving the sign,
1109 * so we copy the fields into temporary variables.
1112 * In NT 4.0 SP3, WaitForSingleObject can occassionally timeout
1113 * earlier than requested. Therefore, our pthread_cond_timedwait
1114 * can also return early.
1118 n_milli = now.millitm;
1119 then.time = abstime->tv_sec;
1120 t_milli = abstime->tv_nsec/1000000;
1122 if((then.time > now.time ||
1123 (then.time == now.time && t_milli > n_milli))) {
1124 if((t_milli -= n_milli) < 0) {
1128 then.time -= now.time;
1130 if ((then.time + (clock() / CLOCKS_PER_SEC)) <= 50000000) {
1132 * Under NT, we can only wait for milliseconds, so we
1133 * round up the wait time here.
1135 rc = cond_wait_internal(cond, mutex,
1136 (DWORD)((then.time * 1000) + (t_milli)));
1150 int pthread_cond_signal(pthread_cond_t *cond) {
1152 cond_waiters_t *release_thread;
1155 EnterCriticalSection(&cond->cs);
1158 * remove the first waiting thread from the queue
1159 * and resume his execution
1161 if (queue_IsNotEmpty(&cond->waiting_threads)) {
1162 release_thread = queue_First(&cond->waiting_threads,
1164 queue_Remove(release_thread);
1165 if (!SetEvent(release_thread->event)) {
1170 LeaveCriticalSection(&cond->cs);
1178 int pthread_cond_broadcast(pthread_cond_t *cond) {
1180 cond_waiters_t *release_thread, *next_thread;
1183 EnterCriticalSection(&cond->cs);
1186 * Empty the waiting_threads queue.
1188 if (queue_IsNotEmpty(&cond->waiting_threads)) {
1189 for(queue_Scan(&cond->waiting_threads, release_thread,
1190 next_thread, cond_waiter)) {
1191 queue_Remove(release_thread);
1192 if (!SetEvent(release_thread->event)) {
1198 LeaveCriticalSection(&cond->cs);
1206 int pthread_cond_destroy(pthread_cond_t *cond) {
1210 DeleteCriticalSection(&cond->cs);
1216 * A previous version of this file had code to check the waiter
1217 * queue and empty it here. This has been removed in the hopes
1218 * that it will aid in debugging.
1224 int pthread_join(pthread_t target_thread, void **status) {
1226 thread_p me, target;
1229 target = (thread_p) target_thread;
1230 me = (thread_p) pthread_self();
1234 * Check to see that the target thread is joinable and hasn't
1235 * already been joined.
1238 pthread_mutex_lock(&active_Q_mutex);
1240 for(queue_Scan(&active_Q, cur, next, thread)) {
1241 if (target == cur) break;
1244 if (target == cur) {
1245 if ((!target->is_joinable) || (target->has_been_joined)) {
1253 pthread_mutex_unlock(&active_Q_mutex);
1257 target->waiter_count++;
1258 while(target->running) {
1259 pthread_cond_wait(&target->wait_terminate, &active_Q_mutex);
1263 * Only one waiter gets the status and is allowed to join, all the
1264 * others get an error.
1267 if (target->has_been_joined) {
1270 target->has_been_joined = 1;
1272 *status = target->rc;
1277 * If we're the last waiter it is our responsibility to remove
1278 * this entry from the terminated list and put it back in the
1282 target->waiter_count--;
1283 if (target->waiter_count == 0) {
1284 queue_Remove(target);
1285 pthread_mutex_unlock(&active_Q_mutex);
1288 pthread_mutex_unlock(&active_Q_mutex);
1298 * Note that we can't return an error from pthread_getspecific so
1299 * we return a NULL pointer instead.
1302 void *pthread_getspecific(pthread_key_t key) {
1304 char **tsd = TlsGetValue(tsd_index);
1309 if ((key > -1) && (key < PTHREAD_KEYS_MAX )) {
1310 rc = (void *) *(tsd + key);
1316 static int p_tsd_done;
1318 static void pthread_tsd_init(void) {
1319 pthread_mutex_init(&pthread_tsd_mutex, (const pthread_mutexattr_t*)0);
1323 int pthread_key_create(pthread_key_t *keyp, void (*destructor)(void *value)) {
1327 if (p_tsd_done || (!pthread_once(&pthread_tsd_once, pthread_tsd_init))) {
1328 if (!pthread_mutex_lock(&pthread_tsd_mutex)) {
1329 for(i=0;i<PTHREAD_KEYS_MAX;i++) {
1330 if (!keys[i].inuse) break;
1333 if (!keys[i].inuse) {
1335 keys[i].destructor = destructor;
1340 pthread_mutex_unlock(&pthread_tsd_mutex);
1351 int pthread_key_delete(pthread_key_t key) {
1354 if (p_tsd_done || (!pthread_once(&pthread_tsd_once, pthread_tsd_init))) {
1355 if ((key > -1) && (key < PTHREAD_KEYS_MAX )) {
1356 if (!pthread_mutex_lock(&pthread_tsd_mutex)) {
1357 keys[key].inuse = 0;
1358 keys[key].destructor = NULL;
1359 pthread_mutex_unlock(&pthread_tsd_mutex);
1373 int pthread_setspecific(pthread_key_t key, const void *value) {
1377 /* make sure all thread-local storage has been allocated */
1380 if (p_tsd_done || (!pthread_once(&pthread_tsd_once, pthread_tsd_init))) {
1381 if ((key > -1) && (key < PTHREAD_KEYS_MAX )) {
1382 if (!pthread_mutex_lock(&pthread_tsd_mutex)) {
1383 if (keys[key].inuse) {
1384 tsd = TlsGetValue(tsd_index);
1385 *(tsd + key) = (char *) value;
1389 pthread_mutex_unlock(&pthread_tsd_mutex);
1403 pthread_t pthread_self(void) {
1405 DWORD my_id = GetCurrentThreadId();
1407 (pthread_cache_done || pthread_once(&pthread_cache_once, create_once));
1408 (tsd_done || pthread_once(&global_tsd_once, tsd_once));
1410 pthread_mutex_lock(&active_Q_mutex);
1412 cur = TlsGetValue (tsd_pthread_index);
1416 * This thread's ID was not found in our list of pthread-API client
1417 * threads (e.g., those threads created via pthread_create). Create
1420 if ((cur = get_thread()) != NULL) {
1421 cur->is_joinable = 0;
1423 cur->native_thread = 1;
1424 DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
1425 GetCurrentProcess(), &cur->t_handle, 0,
1426 TRUE, DUPLICATE_SAME_ACCESS);
1429 * We'll also need a place to store key data for this thread
1431 if ((cur->tsd = malloc(sizeof(char*) * PTHREAD_KEYS_MAX)) != NULL) {
1432 memset(cur->tsd, 0, (sizeof(char*) * PTHREAD_KEYS_MAX));
1434 TlsSetValue(tsd_index, (LPVOID)cur->tsd);
1435 TlsSetValue(tsd_pthread_index, (LPVOID)cur);
1438 * The thread_t structure is complete; add it to the active_Q
1440 queue_Prepend(&active_Q, cur);
1443 * We were able to successfully insert a new entry into the
1444 * active_Q; however, when this thread terminates, we will need
1445 * to know about it. The pthread_sync_terminate_thread() routine
1446 * will make sure there is a dedicated thread waiting for any
1447 * native-thread entries in the active_Q to terminate.
1449 pthread_sync_terminate_thread();
1453 pthread_mutex_unlock(&active_Q_mutex);
1455 return (void *) cur;
1458 int pthread_equal(pthread_t t1, pthread_t t2) {
1462 int pthread_attr_destroy(pthread_attr_t *attr) {
1468 int pthread_attr_init(pthread_attr_t *attr) {
1472 attr->is_joinable = PTHREAD_CREATE_JOINABLE;
1480 int pthread_attr_getdetachstate(pthread_attr_t *attr, int *detachstate) {
1483 if ((attr != NULL) && (detachstate != NULL)) {
1484 *detachstate = attr->is_joinable;
1491 int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) {
1494 if ((attr != NULL) && ((detachstate == PTHREAD_CREATE_JOINABLE) ||
1495 (detachstate == PTHREAD_CREATE_DETACHED))) {
1496 attr->is_joinable = detachstate;
1503 void pthread_exit(void *status) {
1504 thread_p me = (thread_p) pthread_self();
1507 * Support pthread_exit for thread's created by calling pthread_create
1508 * only. Do this by using an exception that will transfer control
1509 * back to afs_pthread_create_stub. Store away our status before
1512 * If this turns out to be a native thread, the exception will be
1513 * unhandled and the process will terminate.
1517 RaiseException(PTHREAD_EXIT_EXCEPTION, 0, 0, NULL);
1522 * DllMain() -- Entry-point function called by the DllMainCRTStartup()
1523 * function in the MSVC runtime DLL (msvcrt.dll).
1525 * Note: the system serializes calls to this function.
1528 DllMain(HINSTANCE dllInstHandle,/* instance handle for this DLL module */
1529 DWORD reason, /* reason function is being called */
1531 { /* reserved for future use */
1533 case DLL_PROCESS_ATTACH:
1534 /* library is being attached to a process */
1535 /* disable thread attach/detach notifications */
1536 (void)DisableThreadLibraryCalls(dllInstHandle);
1538 pthread_once(&pthread_cache_once, create_once);
1539 pthread_once(&global_tsd_once, tsd_once);
1540 pthread_once(&waiter_cache_once, init_waiter_cache);
1543 case DLL_PROCESS_DETACH:
1544 cleanup_waiter_cache();
1545 cleanup_global_tsd();
1546 cleanup_pthread_cache();