2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
15 #include "afs/sysincludes.h" /* Standard vendor system headers */
16 #include "afsincludes.h" /* Afs-based standard headers */
17 #include "afs/afs_stats.h" /* afs statistics */
19 #include <sys/adspace.h> /* for vm_att(), vm_det() */
22 static char memZero; /* address of 0 bytes for kmem_alloc */
28 /* osi_Init -- do once per kernel installation initialization.
29 * -- On Solaris this is called from modload initialization.
30 * -- On AIX called from afs_config.
31 * -- On HP called from afsc_link.
32 * -- On SGI called from afs_init. */
35 lock_t afs_event_lock;
45 if (once++ > 0) /* just in case */
47 #if defined(AFS_HPUX_ENV)
49 #else /* AFS_HPUX_ENV */
50 #if defined(AFS_GLOBAL_SUNLOCK)
51 #if defined(AFS_SGI62_ENV)
52 mutex_init(&afs_global_lock, MUTEX_DEFAULT, "afs_global_lock");
53 #elif defined(AFS_OSF_ENV)
54 usimple_lock_init(&afs_global_lock);
55 afs_global_owner = (thread_t)0;
56 #elif defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
57 lockinit(&afs_global_lock, PLOCK, "afs global lock", 0, 0);
59 #elif defined(AFS_AIX41_ENV)
60 lock_alloc((void*)&afs_global_lock, LOCK_ALLOC_PIN, 1, 1);
61 simple_lock_init((void *)&afs_global_lock);
62 #elif !defined(AFS_LINUX22_ENV)
63 /* Linux initialization in osi directory. Should move the others. */
64 mutex_init(&afs_global_lock, "afs_global_lock", MUTEX_DEFAULT, NULL);
66 /* afs_rxglobal_lock is initialized in rx_Init. */
67 #endif /* AFS_GLOBAL_SUNLOCK */
68 #endif /* AFS_HPUX_ENV */
70 if ( !afs_osicred_initialized ) {
71 memset((char *)&afs_osi_cred, 0, sizeof(struct AFS_UCRED));
72 crhold(&afs_osi_cred); /* don't let it evaporate */
73 afs_osicred_initialized = 1;
76 osi_flid.fl_pid = osi_flid.fl_sysid = 0;
80 int osi_Active(register struct vcache *avc)
82 AFS_STATCNT(osi_Active);
83 #if defined(AFS_SUN_ENV) || defined(AFS_AIX_ENV) || defined(AFS_OSF_ENV) || defined(AFS_SUN5_ENV) || (AFS_LINUX20_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
84 if ((avc->opens > 0) || (avc->states & CMAPPED)) return 1; /* XXX: Warning, verify this XXX */
85 #elif defined(AFS_MACH_ENV)
86 if (avc->opens > 0 || ((avc->v.v_flag & VTEXT) && !inode_uncache_try(avc))) return 1;
87 #elif defined(AFS_SGI_ENV)
88 if ((avc->opens > 0) || AFS_VN_MAPPED(AFSTOV(avc)))
91 if (avc->opens > 0 || (AFSTOV(avc)->v_flag & VTEXT)) return(1);
96 /* this call, unlike osi_FlushText, is supposed to discard caches that may
97 contain invalid information if a file is written remotely, but that may
98 contain valid information that needs to be written back if the file is
99 being written locally. It doesn't subsume osi_FlushText, since the latter
100 function may be needed to flush caches that are invalidated by local writes.
102 avc->pvnLock is already held, avc->lock is guaranteed not to be held (by
105 void osi_FlushPages(register struct vcache *avc, struct AFS_UCRED *credp)
108 ObtainReadLock(&avc->lock);
109 /* If we've already purged this version, or if we're the ones
110 writing this version, don't flush it (could lose the
111 data we're writing). */
112 if ((hcmp((avc->m.DataVersion), (avc->mapDV)) <= 0) ||
113 ((avc->execsOrWriters > 0) && afs_DirtyPages(avc))) {
114 ReleaseReadLock(&avc->lock);
117 ReleaseReadLock(&avc->lock);
118 ObtainWriteLock(&avc->lock,10);
120 if ((hcmp((avc->m.DataVersion), (avc->mapDV)) <= 0) ||
121 ((avc->execsOrWriters > 0) && afs_DirtyPages(avc))) {
122 ReleaseWriteLock(&avc->lock);
125 if (hiszero(avc->mapDV)) {
126 hset(avc->mapDV, avc->m.DataVersion);
127 ReleaseWriteLock(&avc->lock);
131 AFS_STATCNT(osi_FlushPages);
132 hset(origDV, avc->m.DataVersion);
133 afs_Trace3(afs_iclSetp, CM_TRACE_FLUSHPAGES, ICL_TYPE_POINTER, avc,
134 ICL_TYPE_INT32, origDV.low, ICL_TYPE_INT32, avc->m.Length);
136 ReleaseWriteLock(&avc->lock);
138 osi_VM_FlushPages(avc, credp);
140 ObtainWriteLock(&avc->lock,88);
142 /* do this last, and to original version, since stores may occur
143 while executing above PUTPAGE call */
144 hset(avc->mapDV, origDV);
145 ReleaseWriteLock(&avc->lock);
148 afs_lock_t afs_ftf; /* flush text lock */
152 /* This call is supposed to flush all caches that might be invalidated
153 * by either a local write operation or a write operation done on
154 * another client. This call may be called repeatedly on the same
155 * version of a file, even while a file is being written, so it
156 * shouldn't do anything that would discard newly written data before
157 * it is written to the file system. */
159 void osi_FlushText_really(register struct vcache *vp)
161 afs_hyper_t fdv; /* version before which we'll flush */
163 AFS_STATCNT(osi_FlushText);
164 /* see if we've already flushed this data version */
165 if (hcmp(vp->m.DataVersion, vp->flushDV) <= 0) return;
169 void afs_gfs_FlushText();
170 afs_gfs_FlushText(vp);
175 MObtainWriteLock(&afs_ftf,317);
176 hset(fdv, vp->m.DataVersion);
178 /* why this disgusting code below?
179 * xuntext, called by xrele, doesn't notice when it is called
180 * with a freed text object. Sun continually calls xrele or xuntext
181 * without any locking, as long as VTEXT is set on the
182 * corresponding vnode.
183 * But, if the text object is locked when you check the VTEXT
184 * flag, several processes can wait in xuntext, waiting for the
185 * text lock; when the second one finally enters xuntext's
186 * critical region, the text object is already free, but the check
187 * was already done by xuntext's caller.
188 * Even worse, it turns out that xalloc locks the text object
189 * before reading or stating a file via the vnode layer. Thus, we
190 * could end up in getdcache, being asked to bring in a new
191 * version of a file, but the corresponding text object could be
192 * locked. We can't flush the text object without causing
193 * deadlock, so now we just don't try to lock the text object
194 * unless it is guaranteed to work. And we try to flush the text
195 * when we need to a bit more often at the vnode layer. Sun
196 * really blew the vm-cache flushing interface.
199 #if defined (AFS_HPUX_ENV)
200 if (vp->v.v_flag & VTEXT) {
203 if (vp->v.v_flag & VTEXT) { /* still has a text object? */
204 MReleaseWriteLock(&afs_ftf);
210 /* next do the stuff that need not check for deadlock problems */
213 /* finally, record that we've done it */
214 hset(vp->flushDV, fdv);
215 MReleaseWriteLock(&afs_ftf);
217 #endif /* AFS_DEC_ENV */
221 /* I don't really like using xinval() here, because it kills processes
222 * a bit aggressively. Previous incarnations of this functionality
223 * used to use xrele() instead of xinval, and didn't invoke
224 * cacheinval(). But they would panic. So it might be worth looking
225 * into some middle ground...
227 static void afs_gfs_FlushText(register struct vcache *vp)
229 afs_hyper_t fdv; /* version before which we'll flush */
230 register struct text *xp;
233 MObtainWriteLock(&afs_ftf,318);
234 hset(fdv, vp->m.DataVersion);
238 /* this happens frequently after cores are created. */
239 MReleaseWriteLock(&afs_ftf);
243 if (gp->g_flag & GTEXT) {
245 xp = (struct text *) gp->g_textp ;
246 /* if text object is locked, give up */
247 if (xp && (xp->x_flag & XLOCK)) {
248 MReleaseWriteLock(&afs_ftf);
254 if (gp->g_flag & GTEXT) { /* still has a text object? */
259 /* next do the stuff that need not check for deadlock problems */
260 /* maybe xinval(gp); here instead of above */
263 /* finally, record that we've done it */
264 hset(vp->flushDV, fdv);
266 MReleaseWriteLock(&afs_ftf);
268 #endif /* AFS_DEC_ENV */
270 #endif /* AFS_TEXT_ENV */
272 /* mask signals in afsds */
273 void afs_osi_MaskSignals(void)
275 #ifdef AFS_LINUX22_ENV
280 /* unmask signals in rxk listener */
281 void afs_osi_UnmaskRxkSignals(void)
283 #ifdef AFS_LINUX22_ENV
288 /* register rxk listener proc info */
289 void afs_osi_RxkRegister(void)
291 #ifdef AFS_LINUX22_ENV
296 /* procedure for making our processes as invisible as we can */
297 void afs_osi_Invisible(void)
299 #ifdef AFS_LINUX22_ENV
300 afs_osi_MaskSignals();
301 #elif defined(AFS_DEC_ENV)
302 u.u_procp->p_type |= SSYS;
303 #elif defined(AFS_SUN5_ENV)
304 curproc->p_flag |= SSYS;
305 #elif defined(AFS_HPUX101_ENV)
306 set_system_proc(u.u_procp);
307 #elif defined(AFS_DARWIN_ENV)
308 /* maybe call init_process instead? */
309 current_proc()->p_flag |= P_SYSTEM;
310 #elif defined(AFS_XBSD_ENV)
311 curproc->p_flag |= P_SYSTEM;
312 #elif defined(AFS_SGI_ENV)
316 AFS_STATCNT(osi_Invisible);
320 #ifndef AFS_LINUX20_ENV /* Linux version in osi_misc.c */
321 /* set the real time */
322 void afs_osi_SetTime(osi_timeval_t *atv)
325 struct timestruc_t t;
327 t.tv_sec = atv->tv_sec;
328 t.tv_nsec = atv->tv_usec * 1000;
329 ksettimer(&t); /* Was -> settimer(TIMEOFDAY, &t); */
336 * To get more than second resolution we can use adjtime. The problem
337 * is that the usecs from the server are wrong (by now) so it isn't
338 * worth complicating the following code.
344 sta.time = atv->tv_sec;
348 #if defined(AFS_SGI_ENV)
354 sta.time = atv->tv_sec;
358 #if defined(AFS_FBSD_ENV)
359 /* does not impliment security features of kern_time.c:settime() */
361 struct timeval tv,delta;
367 timevalsub(&delta, &tv);
368 ts.tv_sec=atv->tv_sec;
369 ts.tv_nsec=atv->tv_usec * 1000;
370 set_timecounter(&ts);
371 (void) splsoftclock();
372 lease_updatetime(delta.tv_sec);
377 #if defined(AFS_DARWIN_ENV)
382 /* stolen from kern_time.c */
384 boottime.tv_sec += atv->tv_sec - time.tv_sec;
388 #if !defined(AFS_HPUX1122_ENV)
389 /* drop the setting of the clock for now. spl7 is not
394 t.tv_sec = atv->tv_sec;
395 t.tv_usec = atv->tv_usec;
396 s = spl7(); time = t; (void) splx(s);
403 s = splclock(); time = *atv; (void) splx(s);
408 logtchg(atv->tv_sec);
410 #endif /* AFS_DARWIN_ENV */
411 #endif /* AFS_FBSD_ENV */
412 #endif /* AFS_SGI_ENV */
413 #endif /* AFS_SUN55_ENV */
414 #endif /* AFS_SUN5_ENV */
415 #endif /* AFS_AIX32_ENV */
416 AFS_STATCNT(osi_SetTime);
418 #endif /* AFS_LINUX20_ENV */
421 void *afs_osi_Alloc(size_t x)
423 register struct osimem *tm = NULL;
426 AFS_STATCNT(osi_Alloc);
427 /* 0-length allocs may return NULL ptr from AFS_KALLOC, so we special-case
428 things so that NULL returned iff an error occurred */
429 if (x == 0) return &memZero;
431 AFS_STATS(afs_stats_cmperf.OutStandingAllocs++);
432 AFS_STATS(afs_stats_cmperf.OutStandingMemUsage += x);
433 #ifdef AFS_LINUX20_ENV
434 return osi_linux_alloc(x, 1);
437 tm = (struct osimem *) AFS_KALLOC(size);
440 osi_Panic("osi_Alloc: Couldn't allocate %d bytes; out of memory!\n",
447 #if defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV)
449 void *afs_osi_Alloc_NoSleep(size_t x)
451 register struct osimem *tm;
454 AFS_STATCNT(osi_Alloc);
455 /* 0-length allocs may return NULL ptr from AFS_KALLOC, so we special-case
456 things so that NULL returned iff an error occurred */
457 if (x == 0) return &memZero;
460 AFS_STATS(afs_stats_cmperf.OutStandingAllocs++);
461 AFS_STATS(afs_stats_cmperf.OutStandingMemUsage += x);
462 tm = (struct osimem *) AFS_KALLOC_NOSLEEP(size);
466 #endif /* SUN || SGI */
468 void afs_osi_Free(void *x, size_t asize)
470 AFS_STATCNT(osi_Free);
471 if (x == &memZero) return; /* check for putting memZero back */
473 AFS_STATS(afs_stats_cmperf.OutStandingAllocs--);
474 AFS_STATS(afs_stats_cmperf.OutStandingMemUsage -= asize);
475 #if defined(AFS_LINUX20_ENV)
478 AFS_KFREE((struct osimem *)x, asize);
482 void afs_osi_FreeStr(char *x)
484 afs_osi_Free(x, strlen(x) + 1);
487 /* ? is it moderately likely that there are dirty VM pages associated with
490 * Prereqs: avc must be write-locked
492 * System Dependencies: - *must* support each type of system for which
493 * memory mapped files are supported, even if all
494 * it does is return TRUE;
496 * NB: this routine should err on the side of caution for ProcessFS to work
497 * correctly (or at least, not to introduce worse bugs than already exist)
500 int osi_VMDirty_p(struct vcache *avc)
504 if (avc->execsOrWriters <= 0)
505 return 0; /* can't be many dirty pages here, I guess */
507 #if defined (AFS_AIX32_ENV)
509 /* because of the level of hardware involvment with VM and all the
510 * warnings about "This routine must be called at VMM interrupt
511 * level", I thought it would be safest to disable interrupts while
512 * looking at the software page fault table. */
514 /* convert vm handle into index into array: I think that stoinio is
515 * always zero... Look into this XXX */
516 #define VMHASH(handle) ( \
517 ( ((handle) & ~vmker.stoinio) \
518 ^ ((((handle) & ~vmker.stoinio) & vmker.stoimask) << vmker.stoihash) \
522 unsigned int pagef, pri, index, next;
524 index = VMHASH(avc->vmh);
525 if (scb_valid(index)) { /* could almost be an ASSERT */
527 pri = disable_ints();
528 for (pagef = scb_sidlist(index); pagef >= 0; pagef = next) {
529 next = pft_sidfwd(pagef);
530 if (pft_modbit(pagef)) { /* has page frame been modified? */
540 #endif /* AFS_AIX32_ENV */
542 #if defined (AFS_SUN_ENV)
543 if (avc->states & CMAPPED) {
545 for (pg = avc->v.v_s.v_Pages ; pg ; pg = pg->p_vpnext) {
558 * Solaris osi_ReleaseVM should not drop and re-obtain the vcache entry lock.
559 * This leads to bad races when osi_ReleaseVM() is called from
560 * afs_InvalidateAllSegments().
562 * We can do this because Solaris osi_VM_Truncate() doesn't care whether the
563 * vcache entry lock is held or not.
565 * For other platforms, in some cases osi_VM_Truncate() doesn't care, but
566 * there may be cases where it does care. If so, it would be good to fix
567 * them so they don't care. Until then, we assume the worst.
569 * Locking: the vcache entry lock is held. It is dropped and re-obtained.
571 void osi_ReleaseVM(struct vcache *avc, struct AFS_UCRED *acred)
575 osi_VM_Truncate(avc, 0, acred);
578 ReleaseWriteLock(&avc->lock);
580 osi_VM_Truncate(avc, 0, acred);
582 ObtainWriteLock(&avc->lock, 80);
587 void shutdown_osi(void)
589 AFS_STATCNT(shutdown_osi);
590 if (afs_cold_shutdown) {
591 LOCK_INIT(&afs_ftf, "afs_ftf");
596 int afs_osi_suser(void *credp)
598 #if defined(AFS_SUN5_ENV)
599 return afs_suser(credp);
608 /* afs_osi_TraverseProcTable() - Walk through the systems process
609 * table, calling afs_GCPAGs_perproc_func() for each process.
612 #if defined(AFS_SUN5_ENV)
613 void afs_osi_TraverseProcTable(void)
616 for (prp = practive; prp != NULL; prp = prp->p_next) {
617 afs_GCPAGs_perproc_func(prp);
622 #if defined(AFS_HPUX_ENV)
625 * NOTE: h/proc_private.h gives the process table locking rules
626 * It indicates that access to p_cred must be protected by
628 * mp_mtproc_unlock(p);
630 * The code in sys/pm_prot.c uses pcred_lock() to protect access to
631 * the process creds, and uses mp_mtproc_lock() only for audit-related
632 * changes. To be safe, we use both.
635 void afs_osi_TraverseProcTable(void)
640 MP_SPINLOCK(activeproc_lock);
641 MP_SPINLOCK(sched_lock);
645 * Instead of iterating through all of proc[], traverse only
646 * the list of active processes. As an example of this,
647 * see foreach_process() in sys/vm_sched.c.
649 * We hold the locks for the entire scan in order to get a
650 * consistent view of the current set of creds.
653 for(p = proc; endchain == 0; p = &proc[p->p_fandx]) {
654 if (p->p_fandx == 0) {
662 afs_GCPAGs_perproc_func(p);
667 MP_SPINUNLOCK(sched_lock);
668 MP_SPINUNLOCK(activeproc_lock);
672 #if defined(AFS_SGI_ENV)
675 /* TODO: Fix this later. */
676 static int SGI_ProcScanFunc(void *p, void *arg, int mode)
680 #else /* AFS_SGI65_ENV */
681 static int SGI_ProcScanFunc(proc_t *p, void *arg, int mode)
683 afs_int32 (*perproc_func)(struct proc *) = arg;
685 /* we pass in the function pointer for arg,
686 * mode ==0 for startup call, ==1 for each valid proc,
687 * and ==2 for terminate call.
690 code = perproc_func(p);
694 #endif /* AFS_SGI65_ENV */
696 void afs_osi_TraverseProcTable(void)
698 procscan(SGI_ProcScanFunc, afs_GCPAGs_perproc_func);
700 #endif /* AFS_SGI_ENV */
702 #if defined(AFS_AIX_ENV)
704 #define max_proc v.ve_proc
706 void afs_osi_TraverseProcTable(void)
712 * For binary compatibility, on AIX we need to be careful to use the
713 * proper size of a struct proc, even if it is different from what
714 * we were compiled with.
716 if (!afs_gcpags_procsize)
719 #ifndef AFS_AIX51_ENV
720 simple_lock(&proc_tbl_lock);
722 for (p = (struct proc *)v.vb_proc, i = 0;
724 p = (struct proc *)((char *)p + afs_gcpags_procsize), i++) {
727 if (p->p_pvprocp->pv_stat == SNONE)
729 if (p->p_pvprocp->pv_stat == SIDL)
731 if (p->p_pvprocp->pv_stat == SEXIT)
734 if (p->p_stat == SNONE)
736 if (p->p_stat == SIDL)
738 if (p->p_stat == SEXIT)
744 if (PROCMASK(p->p_pid) != i) {
745 afs_gcpags = AFS_GCPAGS_EPIDCHECK;
751 if ((p->p_nice < P_NICE_MIN) || (P_NICE_MAX < p->p_nice)) {
752 afs_gcpags = AFS_GCPAGS_ENICECHECK;
756 afs_GCPAGs_perproc_func(p);
758 #ifndef AFS_AIX51_ENV
759 simple_unlock(&proc_tbl_lock);
764 #if defined(AFS_OSF_ENV)
765 void afs_osi_TraverseProcTable(void)
767 struct pid_entry *pe;
769 #define pidNPID (pidtab + npid)
774 for (pe = pidtab; pe < pidNPID; ++pe) {
775 if (pe->pe_proc != PROC_NULL)
776 afs_GCPAGs_perproc_func(pe->pe_proc);
782 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
783 void afs_osi_TraverseProcTable(void)
786 LIST_FOREACH(p, &allproc, p_list) {
787 if (p->p_stat == SIDL)
789 if (p->p_stat == SZOMB)
791 if (p->p_flag & P_SYSTEM)
793 afs_GCPAGs_perproc_func(p);
798 #if defined(AFS_LINUX22_ENV)
799 void afs_osi_TraverseProcTable()
801 struct task_struct *p;
803 #ifdef EXPORTED_TASKLIST_LOCK
804 read_lock(&tasklist_lock);
806 #ifdef DEFINED_FOR_EACH_PROCESS
807 for_each_process(p) if (p->pid) {
808 if (p->state & TASK_ZOMBIE)
810 afs_GCPAGs_perproc_func(p);
813 for_each_task(p) if (p->pid) {
814 if (p->state & TASK_ZOMBIE)
816 afs_GCPAGs_perproc_func(p);
819 #ifdef EXPORTED_TASKLIST_LOCK
820 read_unlock(&tasklist_lock);
825 /* return a pointer (sometimes a static copy ) to the cred for a
827 * subsequent calls may overwrite the previously returned value.
830 #if defined(AFS_SGI65_ENV)
831 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *p)
835 #elif defined(AFS_HPUX_ENV)
836 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *p)
842 * Cannot use afs_warnuser() here, as the code path
843 * eventually wants to grab sched_lock, which is
849 #elif defined(AFS_AIX_ENV)
851 /* GLOBAL DECLARATIONS */
854 * LOCKS: the caller must do
855 * simple_lock(&proc_tbl_lock);
856 * simple_unlock(&proc_tbl_lock);
857 * around calls to this function.
860 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *pproc)
862 struct AFS_UCRED *pcred = 0;
865 * pointer to process user structure valid in *our*
868 * The user structure for a process is stored in the user
869 * address space (as distinct from the kernel address
870 * space), and so to refer to the user structure of a
871 * different process we must employ special measures.
873 * I followed the example used in the AIX getproc() system
874 * call in bos/kernel/proc/getproc.c
876 struct user *xmem_userp;
878 struct xmem dp; /* ptr to xmem descriptor */
879 int xm; /* xmem result */
886 * The process private segment in which the user
887 * area is located may disappear. We need to increment
888 * its use count. Therefore we
889 * - get the proc_tbl_lock to hold the segment.
890 * - get the p_lock to lockout vm_cleardata.
891 * - vm_att to load the segment register (no check)
892 * - xmattach to bump its use count.
893 * - release the p_lock.
894 * - release the proc_tbl_lock.
895 * - do whatever we need.
896 * - xmdetach to decrement the use count.
897 * - vm_det to free the segment register (no check)
902 /* simple_lock(&proc_tbl_lock); */
903 if (pproc->p_adspace != NULLSEGVAL) {
906 simple_lock(&pproc->p_pvprocp->pv_lock);
908 simple_lock(&pproc->p_lock);
911 if (pproc->p_threadcount &&
913 pproc->p_pvprocp->pv_threadlist) {
915 pproc->p_threadlist) {
919 * arbitrarily pick the first thread in pproc
921 struct thread *pproc_thread =
923 pproc->p_pvprocp->pv_threadlist;
929 * location of 'struct user' in pproc's
932 struct user *pproc_userp =
933 pproc_thread->t_userp;
936 * create a pointer valid in my own address space
940 (struct user *)vm_att(pproc->p_adspace,
943 dp.aspace_id = XMEM_INVAL;
944 xm = xmattach(xmem_userp,
950 simple_unlock(&pproc->p_pvprocp->pv_lock);
952 simple_unlock(&pproc->p_lock);
955 /* simple_unlock(&proc_tbl_lock); */
956 if (xm == XMEM_SUCC) {
958 static struct AFS_UCRED cred;
961 * What locking should we use to protect access to the user
962 * area? If needed also change the code in AIX/osi_groups.c.
965 /* copy cred to local address space */
966 cred = *xmem_userp->U_cred;
972 vm_det((void *)xmem_userp);
978 #elif defined(AFS_OSF_ENV)
979 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *pr)
981 struct AFS_UCRED *rv=NULL;
987 if((pr->p_stat == SSLEEP) ||
988 (pr->p_stat == SRUN) ||
989 (pr->p_stat == SSTOP))
994 #elif defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
995 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *pr)
997 struct AFS_UCRED *rv=NULL;
998 static struct AFS_UCRED cr;
1004 if((pr->p_stat == SSLEEP) ||
1005 (pr->p_stat == SRUN) ||
1006 (pr->p_stat == SSTOP)) {
1009 cr.cr_uid=pr->p_cred->pc_ucred->cr_uid;
1010 cr.cr_ngroups=pr->p_cred->pc_ucred->cr_ngroups;
1011 memcpy(cr.cr_groups, pr->p_cred->pc_ucred->cr_groups, NGROUPS *
1019 #elif defined(AFS_LINUX22_ENV)
1020 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *pr)
1022 struct AFS_UCRED *rv=NULL;
1023 static struct AFS_UCRED cr;
1029 if ((pr->state == TASK_RUNNING) ||
1030 (pr->state == TASK_INTERRUPTIBLE) ||
1031 (pr->state == TASK_UNINTERRUPTIBLE) ||
1032 (pr->state == TASK_STOPPED)) {
1035 cr.cr_ngroups=pr->ngroups;
1036 memcpy(cr.cr_groups, pr->groups, NGROUPS * sizeof(gid_t));
1043 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *pr)
1045 struct AFS_UCRED *rv=NULL;
1056 #endif /* AFS_GCPAGS */