2 * Copyright (C) 1999, 1998 Transarc Corporation. All rights reserved.
5 * This file contains a skeleton pthread implementation for NT.
6 * This is not intended to be a fully compliant pthread implementation
7 * The purpose of this file is to only implement those functions that
8 * are truly needed to support the afs code base.
10 * A secondary goal is to allow a "real" pthread implementation to
11 * replace this file without any modification to code that depends upon
14 * The function signatures and argument types are meant to be the same
15 * as their UNIX prototypes.
16 * Where possible, the POSIX specified return values are used.
17 * For situations where an error can occur, but no corresponding
18 * POSIX error value exists, unique (within a given function) negative
19 * numbers are used for errors to avoid collsions with the errno
23 #include <afs/param.h>
31 #include <sys/timeb.h>
33 #define PTHREAD_EXIT_EXCEPTION 0x1
36 * Posix threads uses static initialization for pthread_once control
37 * objects, and under NT, every sophisticated synchronization primitive
38 * uses procedural initialization. This forces the use of CompareExchange
39 * (aka test and set) and busy waiting for threads that compete to run
40 * a pthread_once'd function. We make these "busy" threads give up their
41 * timeslice - which should cause acceptable behavior on a single processor
42 * machine, but on a multiprocessor machine this could very well result
46 int pthread_once(pthread_once_t *once_control, void (*init_routine)(void)) {
49 if ((once_control != NULL) && (init_routine != NULL)) {
50 if (InterlockedExchange((LPLONG)&once_control->call_started,
53 once_control->call_running = 0;
55 /* use Sleep() since SwitchToThread() not available on Win95 */
56 while(once_control->call_running) Sleep(20);
65 * For now only support PTHREAD_PROCESS_PRIVATE mutexes.
66 * if PTHREAD_PROCESS_SHARED are required later they can be added
69 int pthread_mutex_init(pthread_mutex_t *mp, const pthread_mutexattr_t *attr) {
72 if ((mp != NULL) && (attr == NULL)) {
73 InitializeCriticalSection(&mp->cs);
83 * Under NT, critical sections can be locked recursively by the owning
84 * thread. This is opposite of the pthread spec, and so we keep track
85 * of the thread that has locked a critical section. If the same thread
86 * tries to lock a critical section more than once we fail.
88 int pthread_mutex_trylock(pthread_mutex_t *mp) {
92 /* TryEnterCriticalSection() not available on Win95, so just wait for
93 * the lock. Correct code generally can't depend on how long the
94 * function takes to return, so the only code that will be broken is
95 * that for which 1) the mutex *mp is obtained and never released or
96 * 2) the mutex *mp is intentionally held until trylock() returns.
97 * These cases are unusual and don't appear in normal (non-test) AFS
98 * code; furthermore, we can reduce (but not eliminate!) the problem by
99 * sneaking a look at isLocked even though we don't hold the
100 * CRITICAL_SECTION in mutex *mp and are thus vulnerable to race
101 * conditions. Note that self-deadlock isn't a problem since
102 * CRITICAL_SECTION objects are recursive.
104 * Given the very restricted usage of the pthread library on Windows 95,
105 * we can live with these limitations.
111 rc = pthread_mutex_lock(mp);
117 /* TryEnterCriticalSection() provided on other MS platforms of interest */
119 if (TryEnterCriticalSection(&mp->cs)) {
121 /* same thread tried to recursively lock, fail */
122 LeaveCriticalSection(&mp->cs);
126 mp->tid = GetCurrentThreadId();
135 #endif /* AFS_WIN95_ENV */
141 int pthread_mutex_lock(pthread_mutex_t *mp) {
145 EnterCriticalSection(&mp->cs);
148 mp->tid = GetCurrentThreadId();
151 * same thread tried to recursively lock this mutex.
152 * Under real POSIX, this would cause a deadlock, but NT only
153 * supports recursive mutexes so we indicate the situation
154 * by returning EDEADLK.
156 LeaveCriticalSection(&mp->cs);
166 int pthread_mutex_unlock(pthread_mutex_t *mp) {
170 if (mp->tid == GetCurrentThreadId()) {
173 LeaveCriticalSection(&mp->cs);
183 int pthread_mutex_destroy(pthread_mutex_t *mp) {
187 DeleteCriticalSection(&mp->cs);
196 * keys is used to keep track of which keys are currently
197 * in use by the threads library. pthread_tsd_mutex is used
200 * The bookkeeping for keys in use and destructor function/key is
201 * at the library level. Each individual thread only keeps its
202 * per key data value. This implies that the keys array and the
203 * tsd array in the pthread_t structure need to always be exactly
204 * the same size since the same index is used for both arrays.
209 void (*destructor)(void *);
210 } pthread_tsd_table_t;
212 static pthread_tsd_table_t keys[PTHREAD_KEYS_MAX];
213 static pthread_mutex_t pthread_tsd_mutex;
214 static pthread_once_t pthread_tsd_once = PTHREAD_ONCE_INIT;
217 * In order to support p_self() and p_join() under NT,
218 * we have to keep our own list of active threads and provide a mapping
219 * function that maps the NT thread id to our internal structure.
220 * The main reason that this is necessary is that GetCurrentThread
221 * returns a special constant not an actual handle to the thread.
222 * This makes it impossible to write a p_self() function that works
223 * with only the native NT functions.
226 static struct rx_queue active_Q;
227 static struct rx_queue cache_Q;
229 static pthread_mutex_t active_Q_mutex;
230 static pthread_mutex_t cache_Q_mutex;
232 static pthread_once_t pthread_cache_once = PTHREAD_ONCE_INIT;
233 static int pthread_cache_done;
235 typedef struct thread {
236 struct rx_queue thread_queue;
239 pthread_cond_t wait_terminate;
247 } thread_t, *thread_p;
249 static void create_once(void) {
250 queue_Init(&active_Q);
251 queue_Init(&cache_Q);
252 pthread_mutex_init(&active_Q_mutex, (const pthread_mutexattr_t*)0);
253 pthread_mutex_init(&cache_Q_mutex, (const pthread_mutexattr_t*)0);
254 pthread_cache_done = 1;
257 static void put_thread(thread_p old) {
259 CloseHandle(old->t_handle);
260 pthread_mutex_lock(&cache_Q_mutex);
261 queue_Prepend(&cache_Q, old);
262 pthread_mutex_unlock(&cache_Q_mutex);
265 static thread_p get_thread() {
268 pthread_mutex_lock(&cache_Q_mutex);
270 if (queue_IsEmpty(&cache_Q)) {
271 new = (thread_p) malloc(sizeof(thread_t));
274 * One time initialization - we assume threads put back have
275 * unlocked mutexes and condition variables with no waiters
277 * These functions cannot fail currently.
279 pthread_cond_init(&new->wait_terminate,(const pthread_condattr_t *)0);
282 new = queue_First(&cache_Q, thread);
286 pthread_mutex_unlock(&cache_Q_mutex);
289 * Initialization done every time we hand out a thread_t
295 new->waiter_count = 0;
296 new->has_been_joined = 0;
303 * The thread start function signature is different on NT than the pthread
304 * spec so we create a tiny stub to map from one signature to the next.
305 * This assumes that a void * can be stored within a DWORD.
309 void *(*func)(void *);
311 char *tsd[PTHREAD_KEYS_MAX];
315 static DWORD tsd_index = 0xffffffff;
316 static DWORD tsd_pthread_index = 0xffffffff;
317 static pthread_once_t global_tsd_once = PTHREAD_ONCE_INIT;
320 static void tsd_once(void) {
321 while(tsd_index == 0xffffffff) {
322 tsd_index = TlsAlloc();
324 while(tsd_pthread_index == 0xffffffff) {
325 tsd_pthread_index = TlsAlloc();
330 static void tsd_free_all(char *tsd[PTHREAD_KEYS_MAX]) {
331 int call_more_destructors = 0;
335 void (*destructor)(void *);
336 call_more_destructors = 0;
337 for(i=0;i<PTHREAD_KEYS_MAX;i++) {
338 if (tsd[i] != NULL) {
339 destructor = keys[i].destructor;
340 value = (void *)tsd[i];
342 if (destructor != NULL) {
345 * A side-effect of calling a destructor function is that
346 * more thread specific may be created for this thread.
347 * If we call a destructor, we must recycle through the
348 * entire list again and run any new destructors.
350 call_more_destructors = 1;
354 } while(call_more_destructors);
357 static DWORD WINAPI afs_pthread_create_stub(LPVOID param) {
358 pthread_create_t *t = (pthread_create_t *) param;
362 * Initialize thread specific storage structures.
365 memset(t->tsd, 0, (sizeof(char *) * PTHREAD_KEYS_MAX));
366 (tsd_done || pthread_once(&global_tsd_once, tsd_once));
367 TlsSetValue(tsd_index, (LPVOID) (t->tsd));
368 TlsSetValue(tsd_pthread_index, (LPVOID) (t->me));
371 * Call the function the user passed to pthread_create and catch the
372 * pthread exit exception if it is raised.
376 rc = (*(t->func))(t->arg);
377 } __except(GetExceptionCode() == PTHREAD_EXIT_EXCEPTION) {
378 rc = t->me->rc; /* rc is set at pthread_exit */
382 * Cycle through the thread specific data for this thread and
383 * call the destructor function for each non-NULL datum
386 tsd_free_all (t->tsd);
390 * If we are joinable, signal any waiters.
393 pthread_mutex_lock(&active_Q_mutex);
394 if (t->me->is_joinable) {
397 if (t->me->waiter_count) {
398 pthread_cond_broadcast(&t->me->wait_terminate);
404 pthread_mutex_unlock(&active_Q_mutex);
411 * If a pthread function is called on a thread which was not created by
412 * pthread_create(), that thread will have an entry added to the active_Q
413 * by pthread_self(). When the thread terminates, we need to know
414 * about it, so that we can perform cleanup. A dedicated thread is therefore
415 * maintained, which watches for any thread marked "native_thread==1"
416 * in the active_Q to terminate. The thread spends most of its time sleeping:
417 * it can be signalled by a dedicated event in order to alert it to the
418 * presense of a new thread to watch, or will wake up automatically when
419 * a native thread terminates.
422 static DWORD terminate_thread_id = 0;
423 static HANDLE terminate_thread_handle = INVALID_HANDLE_VALUE;
424 static HANDLE terminate_thread_wakeup_event = INVALID_HANDLE_VALUE;
425 static HANDLE *terminate_thread_wakeup_list = NULL;
426 static size_t terminate_thread_wakeup_list_size = 0;
428 static DWORD WINAPI terminate_thread_routine(LPVOID param) {
430 size_t native_thread_count;
431 int should_terminate;
432 int terminate_thread_wakeup_list_index;
436 * Grab the active_Q_mutex, and while we hold it, scan the active_Q
437 * to see how many native threads we need to watch. If we don't need
438 * to watch any, we can stop this watcher thread entirely (or not);
439 * if we do need to watch some, fill the terminate_thread_wakeup_list
440 * array and go to sleep.
444 native_thread_count = 0;
445 should_terminate = FALSE;
446 pthread_mutex_lock(&active_Q_mutex);
448 for(queue_Scan(&active_Q, cur, next, thread)) {
449 if (cur->native_thread)
450 ++native_thread_count;
454 * At this point we could decide to terminate this watcher thread
455 * whenever there are no longer any native threads to watch--however,
456 * since thread creation is a time-consuming thing, and since this
457 * thread spends all its time sleeping anyway, there's no real
458 * compelling reason to do so. Thus, the following statement is
461 * if (!native_thread_count) {
462 * should_terminate = TRUE;
465 * Restore the snippet above to cause this watcher thread to only
466 * live whenever there are native threads to watch.
471 * Make sure that our wakeup_list array is large enough to contain
472 * the handles of all the native threads /and/ to contain an
473 * entry for our wakeup_event (in case another native thread comes
476 if (terminate_thread_wakeup_list_size < (1+native_thread_count)) {
477 if (terminate_thread_wakeup_list)
478 free (terminate_thread_wakeup_list);
479 terminate_thread_wakeup_list = (HANDLE*)malloc (sizeof(HANDLE) *
480 (1+native_thread_count));
481 if (terminate_thread_wakeup_list == NULL) {
482 should_terminate = TRUE;
484 terminate_thread_wakeup_list_size = 1+native_thread_count;
488 if (should_terminate) {
490 * Here, we've decided to terminate this watcher thread.
491 * Free our wakeup event and wakeup list, then release the
492 * active_Q_mutex and break this loop.
494 if (terminate_thread_wakeup_list)
495 free (terminate_thread_wakeup_list);
496 CloseHandle (terminate_thread_wakeup_event);
497 terminate_thread_id = 0;
498 terminate_thread_handle = INVALID_HANDLE_VALUE;
499 terminate_thread_wakeup_event = INVALID_HANDLE_VALUE;
500 terminate_thread_wakeup_list = NULL;
501 terminate_thread_wakeup_list_size = 0;
502 pthread_mutex_unlock(&active_Q_mutex);
506 * Here, we've decided to wait for native threads et al.
507 * Fill out the wakeup_list.
509 memset(terminate_thread_wakeup_list, 0x00, (sizeof(HANDLE) *
510 (1+native_thread_count)));
512 terminate_thread_wakeup_list[0] = terminate_thread_wakeup_event;
513 terminate_thread_wakeup_list_index = 1;
517 for(queue_Scan(&active_Q, cur, next, thread)) {
518 if (cur->native_thread) {
519 terminate_thread_wakeup_list[terminate_thread_wakeup_list_index]
521 ++terminate_thread_wakeup_list_index;
525 ResetEvent (terminate_thread_wakeup_event);
528 pthread_mutex_unlock(&active_Q_mutex);
531 * Time to sleep. We'll wake up if either of the following happen:
532 * 1) Someone sets the terminate_thread_wakeup_event (this will
533 * happen if another native thread gets added to the active_Q)
534 * 2) One or more of the native threads terminate
536 terminate_thread_wakeup_list_index = WaitForMultipleObjects(
537 1+native_thread_count,
538 terminate_thread_wakeup_list,
543 * If we awoke from sleep because an event other than
544 * terminate_thread_wakeup_event was triggered, it means the
545 * specified thread has terminated. (If more than one thread
546 * terminated, we'll handle this first one and loop around--
547 * the event's handle will still be triggered, so we just won't
548 * block at all when we sleep next time around.)
550 if (terminate_thread_wakeup_list_index > 0) {
551 pthread_mutex_lock(&active_Q_mutex);
555 for(queue_Scan(&active_Q, cur, next, thread)) {
556 if (cur->t_handle == terminate_thread_wakeup_list[ terminate_thread_wakeup_list_index ])
562 * Cycle through the thread specific data for the specified
563 * thread and call the destructor function for each non-NULL
564 * datum. Then remove the thread_t from active_Q and put it
565 * back on cache_Q for possible later re-use.
567 if(cur->tsd != NULL) {
568 tsd_free_all(cur->tsd);
576 pthread_mutex_unlock(&active_Q_mutex);
583 static void pthread_sync_terminate_thread(void) {
584 (pthread_cache_done || pthread_once(&pthread_cache_once, create_once));
586 if (terminate_thread_handle == INVALID_HANDLE_VALUE) {
587 terminate_thread_wakeup_event = CreateEvent((LPSECURITY_ATTRIBUTES) 0,
588 TRUE, FALSE, (LPCTSTR) 0);
589 terminate_thread_handle = CreateThread((LPSECURITY_ATTRIBUTES) 0, 0,
590 terminate_thread_routine, (LPVOID) 0, 0,
591 &terminate_thread_id);
593 SetEvent (terminate_thread_wakeup_event);
599 * Only support the detached attribute specifier for pthread_create.
600 * Under NT, thread stacks grow automatically as needed.
603 int pthread_create(pthread_t *tid, const pthread_attr_t *attr, void *(*func)(void *), void *arg) {
605 pthread_create_t *t = NULL;
607 (pthread_cache_done || pthread_once(&pthread_cache_once, create_once));
609 if ((tid != NULL) && (func != NULL)) {
610 if ((t = (pthread_create_t *) malloc(sizeof(pthread_create_t))) &&
611 (t->me = get_thread()) ) {
614 *tid = (pthread_t) t->me;
616 t->me->is_joinable = attr->is_joinable;
618 t->me->is_joinable = PTHREAD_CREATE_JOINABLE;
620 t->me->native_thread = 0;
623 * At the point (before we actually create the thread)
624 * we need to add our entry to the active queue. This ensures
625 * us that other threads who may run after this thread returns
626 * will find an entry for the create thread regardless of
627 * whether the newly created thread has run or not.
628 * In the event the thread create fails, we will have temporarily
629 * added an entry to the list that was never valid, but we
630 * (i.e. the thread that is calling thread_create) are the
631 * only one who could possibly know about the bogus entry
632 * since we hold the active_Q_mutex.
634 pthread_mutex_lock(&active_Q_mutex);
635 queue_Prepend(&active_Q, t->me);
636 t->me->t_handle = CreateThread((LPSECURITY_ATTRIBUTES) 0, 0,
637 afs_pthread_create_stub, (LPVOID) t, 0,
639 if (t->me->t_handle == 0) {
641 * we only free t if the thread wasn't created, otherwise
642 * it's free'd by the new thread.
649 pthread_mutex_unlock(&active_Q_mutex);
662 int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr) {
666 * Only support default attribute -> must pass a NULL pointer for
669 if ((attr == NULL) && (cond != NULL)) {
670 InitializeCriticalSection(&cond->cs);
671 queue_Init(&cond->waiting_threads);
680 * In order to optimize the performance of condition variables,
681 * we maintain a pool of cond_waiter_t's that have been dynamically
682 * allocated. There is no attempt made to garbage collect these -
683 * once they have been created, they stay in the cache for the life
687 static struct rx_queue waiter_cache;
688 static CRITICAL_SECTION waiter_cache_cs;
689 static int waiter_cache_init;
690 static pthread_once_t waiter_cache_once = PTHREAD_ONCE_INIT;
692 static void init_waiter_cache(void) {
693 InitializeCriticalSection(&waiter_cache_cs);
694 waiter_cache_init = 1;
695 queue_Init(&waiter_cache);
698 static cond_waiters_t *get_waiter() {
699 cond_waiters_t *new = NULL;
701 (waiter_cache_init || pthread_once(&waiter_cache_once, init_waiter_cache));
703 EnterCriticalSection(&waiter_cache_cs);
705 if (queue_IsEmpty(&waiter_cache)) {
706 new = (cond_waiters_t *) malloc(sizeof(cond_waiters_t));
708 new->event = CreateEvent((LPSECURITY_ATTRIBUTES) 0, FALSE,
710 if (new->event == NULL) {
716 new = queue_First(&waiter_cache, cond_waiter);
720 LeaveCriticalSection(&waiter_cache_cs);
725 static void put_waiter(cond_waiters_t *old) {
727 (waiter_cache_init || pthread_once(&waiter_cache_once, init_waiter_cache));
729 EnterCriticalSection(&waiter_cache_cs);
730 queue_Prepend(&waiter_cache, old);
731 LeaveCriticalSection(&waiter_cache_cs);
734 static int cond_wait_internal(pthread_cond_t *cond, pthread_mutex_t *mutex, const DWORD time) {
736 cond_waiters_t *my_entry = get_waiter();
737 cond_waiters_t *cur, *next;
738 int hasnt_been_signalled=0;
740 if ((cond != NULL) && (mutex != NULL) && (my_entry != NULL)) {
741 EnterCriticalSection(&cond->cs);
742 queue_Append(&cond->waiting_threads, my_entry);
743 LeaveCriticalSection(&cond->cs);
745 if (!pthread_mutex_unlock(mutex)) {
746 switch(WaitForSingleObject(my_entry->event, time)) {
753 * This is a royal pain. We've timed out waiting
754 * for the signal, but between the time out and here
755 * it is possible that we were actually signalled by
756 * another thread. So we grab the condition lock
757 * and scan the waiting thread queue to see if we are
758 * still there. If we are, we just remove ourselves.
760 * If we are no longer listed in the waiter queue,
761 * it means that we were signalled after the time
762 * out occurred and so we have to do another wait
763 * WHICH HAS TO SUCCEED! In this case, we reset
764 * rc to indicate that we were signalled.
766 * We have to wait or otherwise, the event
767 * would be cached in the signalled state, which
768 * is wrong. It might be more efficient to just
769 * close and reopen the event.
771 EnterCriticalSection(&cond->cs);
772 for(queue_Scan(&cond->waiting_threads, cur,
773 next, cond_waiter)) {
774 if (cur == my_entry) {
775 hasnt_been_signalled = 1;
779 if (hasnt_been_signalled) {
783 if (ResetEvent(my_entry->event)) {
784 if (pthread_mutex_lock(mutex)) {
791 LeaveCriticalSection(&cond->cs);
797 if (pthread_mutex_lock(mutex)) {
812 if (my_entry != NULL) {
813 put_waiter(my_entry);
819 int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) {
822 rc = cond_wait_internal(cond, mutex, INFINITE);
826 int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) {
828 struct _timeb now, then;
829 short n_milli, t_milli;
831 if (abstime->tv_nsec < 1000000000) {
834 * pthread timedwait uses an absolute time, NT uses relative so
835 * we convert here. The millitm field in the timeb struct is
836 * unsigned, but we need to do subtraction preserving the sign,
837 * so we copy the fields into temporary variables.
840 * In NT 4.0 SP3, WaitForSingleObject can occassionally timeout
841 * earlier than requested. Therefore, our pthread_cond_timedwait
842 * can also return early.
846 n_milli = now.millitm;
847 then.time = abstime->tv_sec;
848 t_milli = abstime->tv_nsec/1000000;
850 if((then.time > now.time ||
851 (then.time == now.time && t_milli > n_milli))) {
852 if((t_milli -= n_milli) < 0) {
856 then.time -= now.time;
858 if ((then.time + (clock() / CLOCKS_PER_SEC)) <= 50000000) {
860 * Under NT, we can only wait for milliseconds, so we
861 * round up the wait time here.
863 rc = cond_wait_internal(cond, mutex,
864 ((then.time * 1000) + (t_milli)));
878 int pthread_cond_signal(pthread_cond_t *cond) {
880 cond_waiters_t *release_thread;
883 EnterCriticalSection(&cond->cs);
886 * remove the first waiting thread from the queue
887 * and resume his execution
889 if (queue_IsNotEmpty(&cond->waiting_threads)) {
890 release_thread = queue_First(&cond->waiting_threads,
892 queue_Remove(release_thread);
893 if (!SetEvent(release_thread->event)) {
898 LeaveCriticalSection(&cond->cs);
906 int pthread_cond_broadcast(pthread_cond_t *cond) {
908 cond_waiters_t *release_thread, *next_thread;
911 EnterCriticalSection(&cond->cs);
914 * Empty the waiting_threads queue.
916 if (queue_IsNotEmpty(&cond->waiting_threads)) {
917 for(queue_Scan(&cond->waiting_threads, release_thread,
918 next_thread, cond_waiter)) {
919 queue_Remove(release_thread);
920 if (!SetEvent(release_thread->event)) {
926 LeaveCriticalSection(&cond->cs);
934 int pthread_cond_destroy(pthread_cond_t *cond) {
938 DeleteCriticalSection(&cond->cs);
944 * A previous version of this file had code to check the waiter
945 * queue and empty it here. This has been removed in the hopes
946 * that it will aid in debugging.
952 int pthread_join(pthread_t target_thread, void **status) {
957 target = (thread_p) target_thread;
958 me = (thread_p) pthread_self();
962 * Check to see that the target thread is joinable and hasn't
963 * already been joined.
966 pthread_mutex_lock(&active_Q_mutex);
968 for(queue_Scan(&active_Q, cur, next, thread)) {
969 if (target == cur) break;
973 if ((!target->is_joinable) || (target->has_been_joined)) {
981 pthread_mutex_unlock(&active_Q_mutex);
985 target->waiter_count++;
986 while(target->running) {
987 pthread_cond_wait(&target->wait_terminate, &active_Q_mutex);
991 * Only one waiter gets the status and is allowed to join, all the
992 * others get an error.
995 if (target->has_been_joined) {
998 target->has_been_joined = 1;
1000 *status = target->rc;
1005 * If we're the last waiter it is our responsibility to remove
1006 * this entry from the terminated list and put it back in the
1010 target->waiter_count--;
1011 if (target->waiter_count == 0) {
1012 queue_Remove(target);
1013 pthread_mutex_unlock(&active_Q_mutex);
1016 pthread_mutex_unlock(&active_Q_mutex);
1026 * Note that we can't return an error from pthread_getspecific so
1027 * we return a NULL pointer instead.
1030 void *pthread_getspecific(pthread_key_t key) {
1032 char **tsd = TlsGetValue(tsd_index);
1034 if ((key > -1) && (key < PTHREAD_KEYS_MAX )) {
1035 rc = (void *) *(tsd + key);
1041 static int p_tsd_done;
1043 static void pthread_tsd_init(void) {
1044 pthread_mutex_init(&pthread_tsd_mutex, (const pthread_mutexattr_t*)0);
1048 int pthread_key_create(pthread_key_t *keyp, void (*destructor)(void *value)) {
1052 if (p_tsd_done || (!pthread_once(&pthread_tsd_once, pthread_tsd_init))) {
1053 if (!pthread_mutex_lock(&pthread_tsd_mutex)) {
1054 for(i=0;i<PTHREAD_KEYS_MAX;i++) {
1055 if (!keys[i].inuse) break;
1058 if (!keys[i].inuse) {
1060 keys[i].destructor = destructor;
1065 pthread_mutex_unlock(&pthread_tsd_mutex);
1076 int pthread_key_delete(pthread_key_t key) {
1079 if (p_tsd_done || (!pthread_once(&pthread_tsd_once, pthread_tsd_init))) {
1080 if ((key > -1) && (key < PTHREAD_KEYS_MAX )) {
1081 if (!pthread_mutex_lock(&pthread_tsd_mutex)) {
1082 keys[key].inuse = 0;
1083 keys[key].destructor = NULL;
1084 pthread_mutex_unlock(&pthread_tsd_mutex);
1098 int pthread_setspecific(pthread_key_t key, const void *value) {
1102 if (p_tsd_done || (!pthread_once(&pthread_tsd_once, pthread_tsd_init))) {
1103 if ((key > -1) && (key < PTHREAD_KEYS_MAX )) {
1104 if (!pthread_mutex_lock(&pthread_tsd_mutex)) {
1105 if (keys[key].inuse) {
1106 tsd = TlsGetValue(tsd_index);
1107 *(tsd + key) = (char *) value;
1111 pthread_mutex_unlock(&pthread_tsd_mutex);
1125 pthread_t pthread_self(void) {
1127 DWORD my_id = GetCurrentThreadId();
1129 (pthread_cache_done || pthread_once(&pthread_cache_once, create_once));
1130 (tsd_done || pthread_once(&global_tsd_once, tsd_once));
1132 pthread_mutex_lock(&active_Q_mutex);
1134 cur = TlsGetValue (tsd_pthread_index);
1138 * This thread's ID was not found in our list of pthread-API client
1139 * threads (e.g., those threads created via pthread_create). Create
1142 if ((cur = get_thread()) != NULL) {
1143 cur->is_joinable = 0;
1145 cur->native_thread = 1;
1146 DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
1147 GetCurrentProcess(), &cur->t_handle, 0,
1148 TRUE, DUPLICATE_SAME_ACCESS);
1151 * We'll also need a place to store key data for this thread
1153 if ((cur->tsd = malloc(sizeof(char*) * PTHREAD_KEYS_MAX)) != NULL) {
1154 memset(cur->tsd, 0, (sizeof(char*) * PTHREAD_KEYS_MAX));
1156 TlsSetValue(tsd_index, (LPVOID)cur->tsd);
1157 TlsSetValue(tsd_pthread_index, (LPVOID)cur);
1160 * The thread_t structure is complete; add it to the active_Q
1162 queue_Prepend(&active_Q, cur);
1165 * We were able to successfully insert a new entry into the
1166 * active_Q; however, when this thread terminates, we will need
1167 * to know about it. The pthread_sync_terminate_thread() routine
1168 * will make sure there is a dedicated thread waiting for any
1169 * native-thread entries in the active_Q to terminate.
1171 pthread_sync_terminate_thread();
1175 pthread_mutex_unlock(&active_Q_mutex);
1177 return (void *) cur;
1180 int pthread_equal(pthread_t t1, pthread_t t2) {
1184 int pthread_attr_destroy(pthread_attr_t *attr) {
1190 int pthread_attr_init(pthread_attr_t *attr) {
1194 attr->is_joinable = PTHREAD_CREATE_JOINABLE;
1202 int pthread_attr_getdetachstate(pthread_attr_t *attr, int *detachstate) {
1205 if ((attr != NULL) && (detachstate != NULL)) {
1206 *detachstate = attr->is_joinable;
1213 int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) {
1216 if ((attr != NULL) && ((detachstate == PTHREAD_CREATE_JOINABLE) ||
1217 (detachstate == PTHREAD_CREATE_DETACHED))) {
1218 attr->is_joinable = detachstate;
1225 void pthread_exit(void *status) {
1226 thread_p me = (thread_p) pthread_self();
1229 * Support pthread_exit for thread's created by calling pthread_create
1230 * only. Do this by using an exception that will transfer control
1231 * back to afs_pthread_create_stub. Store away our status before
1234 * If this turns out to be a native thread, the exception will be
1235 * unhandled and the process will terminate.
1239 RaiseException(PTHREAD_EXIT_EXCEPTION, 0, 0, NULL);