2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
14 #include "afs/sysincludes.h" /* Standard vendor system headers */
15 #include "afsincludes.h" /* Afs-based standard headers */
16 #include "afs/afs_stats.h" /* afs statistics */
18 #if defined(FREEZER_H_EXISTS)
19 #include <linux/freezer.h>
22 static char waitV, dummyV;
25 afs_osi_InitWaitHandle(struct afs_osi_WaitHandle *achandle)
27 AFS_STATCNT(osi_InitWaitHandle);
28 achandle->proc = (caddr_t) 0;
33 afs_osi_CancelWait(struct afs_osi_WaitHandle *achandle)
37 AFS_STATCNT(osi_CancelWait);
38 proc = achandle->proc;
41 achandle->proc = (caddr_t) 0; /* so dude can figure out he was signalled */
42 afs_osi_Wakeup(&waitV);
46 * Waits for data on ahandle, or ams ms later. ahandle may be null.
47 * Returns 0 if timeout and EINTR if signalled.
50 afs_osi_Wait(afs_int32 ams, struct afs_osi_WaitHandle *ahandle, int aintok)
55 AFS_STATCNT(osi_Wait);
56 endTime = osi_Time() + (ams / 1000);
58 ahandle->proc = (caddr_t) current;
62 code = afs_osi_TimedSleep(&waitV, ams, 1);
65 if (ahandle && (ahandle->proc == (caddr_t) 0)) {
66 /* we've been signalled */
69 } while (osi_Time() < endTime);
76 typedef struct afs_event {
77 struct afs_event *next; /* next in hash chain */
78 char *event; /* lwp event: an address */
79 int refcount; /* Is it in use? */
80 int seq; /* Sequence number: this is incremented
81 * by wakeup calls; wait will not return until
83 wait_queue_head_t cond;
87 afs_event_t *afs_evhasht[HASHSIZE]; /* Hash table for events */
88 #define afs_evhash(event) (afs_uint32) ((((long)event)>>2) & (HASHSIZE-1));
89 int afs_evhashcnt = 0;
91 /* Get and initialize event structure corresponding to lwp event (i.e. address)
94 afs_getevent(char *event)
96 afs_event_t *evp, *newp = 0;
100 hashcode = afs_evhash(event);
101 evp = afs_evhasht[hashcode];
103 if (evp->event == event) {
107 if (evp->refcount == 0)
119 /* afs_addevent -- allocates a new event for the address. It isn't returned;
120 * instead, afs_getevent should be called again. Thus, the real effect of
121 * this routine is to add another event to the hash bucket for this
125 * Called with GLOCK held. However the function might drop
126 * GLOCK when it calls osi_AllocSmallSpace for allocating
127 * a new event (In Linux, the allocator drops GLOCK to avoid
132 afs_addevent(char *event)
138 hashcode = afs_evhash(event);
139 newp = osi_linux_alloc(sizeof(afs_event_t), 0);
141 newp->next = afs_evhasht[hashcode];
142 afs_evhasht[hashcode] = newp;
143 init_waitqueue_head(&newp->cond);
145 newp->event = &dummyV; /* Dummy address for new events */
149 #ifndef set_current_state
150 #define set_current_state(x) current->state = (x);
153 /* Release the specified event */
154 #define relevent(evp) ((evp)->refcount--)
158 * Waits for an event to be notified, returning early if a signal
159 * is received. Returns EINTR if signaled, and 0 otherwise.
162 afs_osi_SleepSig(void *event)
164 struct afs_event *evp;
166 #ifdef DECLARE_WAITQUEUE
167 DECLARE_WAITQUEUE(wait, current);
169 struct wait_queue wait = { current, NULL };
172 evp = afs_getevent(event);
175 evp = afs_getevent(event);
181 add_wait_queue(&evp->cond, &wait);
182 while (seq == evp->seq) {
183 set_current_state(TASK_INTERRUPTIBLE);
190 current->flags & PF_FREEZE
192 #if defined(STRUCT_TASK_STRUCT_HAS_TODO)
195 #if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
196 test_ti_thread_flag(current->thread_info, TIF_FREEZE)
198 test_ti_thread_flag(task_thread_info(current), TIF_FREEZE)
203 #ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
204 refrigerator(PF_FREEZE);
210 if (signal_pending(current)) {
215 remove_wait_queue(&evp->cond, &wait);
216 set_current_state(TASK_RUNNING);
222 /* afs_osi_Sleep -- waits for an event to be notified, ignoring signals.
223 * - NOTE: that on Linux, there are circumstances in which TASK_INTERRUPTIBLE
224 * can wake up, even if all signals are blocked
225 * - TODO: handle signals correctly by passing an indication back to the
226 * caller that the wait has been interrupted and the stack should be cleaned
227 * up preparatory to signal delivery
230 afs_osi_Sleep(void *event)
235 saved_set = current->blocked;
236 sigfillset(¤t->blocked);
237 RECALC_SIGPENDING(current);
240 afs_osi_SleepSig(event);
243 current->blocked = saved_set;
244 RECALC_SIGPENDING(current);
248 /* afs_osi_TimedSleep
251 * event - event to sleep on
252 * ams --- max sleep time in milliseconds
253 * aintok - 1 if should sleep interruptibly
255 * Returns 0 if timeout, EINTR if signalled, and EGAIN if it might
259 afs_osi_TimedSleep(void *event, afs_int32 ams, int aintok)
262 long ticks = (ams * HZ / 1000) + 1;
263 struct afs_event *evp;
264 #ifdef DECLARE_WAITQUEUE
265 DECLARE_WAITQUEUE(wait, current);
267 struct wait_queue wait = { current, NULL };
270 evp = afs_getevent(event);
273 evp = afs_getevent(event);
276 add_wait_queue(&evp->cond, &wait);
277 set_current_state(TASK_INTERRUPTIBLE);
278 /* always sleep TASK_INTERRUPTIBLE to keep load average
279 * from artifically increasing. */
283 if (schedule_timeout(ticks))
286 schedule_timeout(ticks);
290 current->flags & PF_FREEZE
292 #if defined(STRUCT_TASK_STRUCT_HAS_TODO)
295 #if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
296 test_ti_thread_flag(current->thread_info, TIF_FREEZE)
298 test_ti_thread_flag(task_thread_info(current), TIF_FREEZE)
303 #ifdef LINUX_REFRIGERATOR_TAKES_PF_FREEZE
304 refrigerator(PF_FREEZE);
311 remove_wait_queue(&evp->cond, &wait);
312 set_current_state(TASK_RUNNING);
321 afs_osi_Wakeup(void *event)
324 struct afs_event *evp;
326 evp = afs_getevent(event);
327 if (!evp) /* No sleepers */
330 if (evp->refcount > 1) {