2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
16 #include "afs/sysincludes.h" /* Standard vendor system headers */
17 #include "afsincludes.h" /* Afs-based standard headers */
18 #include "afs/afs_stats.h" /* afs statistics */
20 #include <sys/adspace.h> /* for vm_att(), vm_det() */
23 static char memZero; /* address of 0 bytes for kmem_alloc */
29 /* osi_Init -- do once per kernel installation initialization.
30 * -- On Solaris this is called from modload initialization.
31 * -- On AIX called from afs_config.
32 * -- On HP called from afsc_link.
33 * -- On SGI called from afs_init. */
36 lock_t afs_event_lock;
43 struct AFS_UCRED *afs_osi_credp;
49 if (once++ > 0) /* just in case */
51 #if defined(AFS_HPUX_ENV)
53 #else /* AFS_HPUX_ENV */
54 #if defined(AFS_GLOBAL_SUNLOCK)
55 #if defined(AFS_SGI62_ENV)
56 mutex_init(&afs_global_lock, MUTEX_DEFAULT, "afs_global_lock");
57 #elif defined(AFS_OSF_ENV)
58 usimple_lock_init(&afs_global_lock);
59 afs_global_owner = (thread_t) 0;
60 #elif defined(AFS_FBSD50_ENV)
61 mtx_init(&afs_global_mtx, "AFS global lock", NULL, MTX_DEF);
62 #elif defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
63 lockinit(&afs_global_lock, PLOCK, "afs global lock", 0, 0);
65 #elif defined(AFS_AIX41_ENV)
66 lock_alloc((void *)&afs_global_lock, LOCK_ALLOC_PIN, 1, 1);
67 simple_lock_init((void *)&afs_global_lock);
68 #elif !defined(AFS_LINUX22_ENV)
69 /* Linux initialization in osi directory. Should move the others. */
70 mutex_init(&afs_global_lock, "afs_global_lock", MUTEX_DEFAULT, NULL);
72 /* afs_rxglobal_lock is initialized in rx_Init. */
73 #endif /* AFS_GLOBAL_SUNLOCK */
74 #endif /* AFS_HPUX_ENV */
76 if (!afs_osicred_initialized) {
77 #if defined(AFS_XBSD_ENV)
78 /* Can't just invent one, must use crget() because of mutex */
79 afs_osi_credp = crdup(osi_curcred());
81 memset(&afs_osi_cred, 0, sizeof(struct AFS_UCRED));
82 #if defined(AFS_LINUX26_ENV)
83 afs_osi_cred.cr_group_info = groups_alloc(0);
85 crhold(&afs_osi_cred); /* don't let it evaporate */
86 afs_osi_credp = &afs_osi_cred;
88 afs_osicred_initialized = 1;
91 osi_flid.fl_pid = osi_flid.fl_sysid = 0;
94 init_et_to_sys_error();
98 osi_Active(register struct vcache *avc)
100 AFS_STATCNT(osi_Active);
101 #if defined(AFS_SUN_ENV) || defined(AFS_AIX_ENV) || defined(AFS_OSF_ENV) || defined(AFS_SUN5_ENV) || (AFS_LINUX20_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
102 if ((avc->opens > 0) || (avc->states & CMAPPED))
103 return 1; /* XXX: Warning, verify this XXX */
104 #elif defined(AFS_MACH_ENV)
106 || ((avc->v.v_flag & VTEXT) && !inode_uncache_try(avc)))
108 #elif defined(AFS_SGI_ENV)
109 if ((avc->opens > 0) || AFS_VN_MAPPED(AFSTOV(avc)))
112 if (avc->opens > 0 || (AFSTOV(avc)->v_flag & VTEXT))
118 /* this call, unlike osi_FlushText, is supposed to discard caches that may
119 contain invalid information if a file is written remotely, but that may
120 contain valid information that needs to be written back if the file is
121 being written locally. It doesn't subsume osi_FlushText, since the latter
122 function may be needed to flush caches that are invalidated by local writes.
124 avc->pvnLock is already held, avc->lock is guaranteed not to be held (by
128 osi_FlushPages(register struct vcache *avc, struct AFS_UCRED *credp)
131 ObtainReadLock(&avc->lock);
132 /* If we've already purged this version, or if we're the ones
133 * writing this version, don't flush it (could lose the
134 * data we're writing). */
135 if ((hcmp((avc->m.DataVersion), (avc->mapDV)) <= 0)
136 || ((avc->execsOrWriters > 0) && afs_DirtyPages(avc))) {
137 ReleaseReadLock(&avc->lock);
140 ReleaseReadLock(&avc->lock);
141 ObtainWriteLock(&avc->lock, 10);
143 if ((hcmp((avc->m.DataVersion), (avc->mapDV)) <= 0)
144 || ((avc->execsOrWriters > 0) && afs_DirtyPages(avc))) {
145 ReleaseWriteLock(&avc->lock);
148 if (hiszero(avc->mapDV)) {
149 hset(avc->mapDV, avc->m.DataVersion);
150 ReleaseWriteLock(&avc->lock);
154 AFS_STATCNT(osi_FlushPages);
155 hset(origDV, avc->m.DataVersion);
156 afs_Trace3(afs_iclSetp, CM_TRACE_FLUSHPAGES, ICL_TYPE_POINTER, avc,
157 ICL_TYPE_INT32, origDV.low, ICL_TYPE_INT32, avc->m.Length);
159 ReleaseWriteLock(&avc->lock);
161 osi_VM_FlushPages(avc, credp);
163 ObtainWriteLock(&avc->lock, 88);
165 /* do this last, and to original version, since stores may occur
166 * while executing above PUTPAGE call */
167 hset(avc->mapDV, origDV);
168 ReleaseWriteLock(&avc->lock);
171 afs_lock_t afs_ftf; /* flush text lock */
175 /* This call is supposed to flush all caches that might be invalidated
176 * by either a local write operation or a write operation done on
177 * another client. This call may be called repeatedly on the same
178 * version of a file, even while a file is being written, so it
179 * shouldn't do anything that would discard newly written data before
180 * it is written to the file system. */
183 osi_FlushText_really(register struct vcache *vp)
185 afs_hyper_t fdv; /* version before which we'll flush */
187 AFS_STATCNT(osi_FlushText);
188 /* see if we've already flushed this data version */
189 if (hcmp(vp->m.DataVersion, vp->flushDV) <= 0)
194 void afs_gfs_FlushText();
195 afs_gfs_FlushText(vp);
200 MObtainWriteLock(&afs_ftf, 317);
201 hset(fdv, vp->m.DataVersion);
203 /* why this disgusting code below?
204 * xuntext, called by xrele, doesn't notice when it is called
205 * with a freed text object. Sun continually calls xrele or xuntext
206 * without any locking, as long as VTEXT is set on the
207 * corresponding vnode.
208 * But, if the text object is locked when you check the VTEXT
209 * flag, several processes can wait in xuntext, waiting for the
210 * text lock; when the second one finally enters xuntext's
211 * critical region, the text object is already free, but the check
212 * was already done by xuntext's caller.
213 * Even worse, it turns out that xalloc locks the text object
214 * before reading or stating a file via the vnode layer. Thus, we
215 * could end up in getdcache, being asked to bring in a new
216 * version of a file, but the corresponding text object could be
217 * locked. We can't flush the text object without causing
218 * deadlock, so now we just don't try to lock the text object
219 * unless it is guaranteed to work. And we try to flush the text
220 * when we need to a bit more often at the vnode layer. Sun
221 * really blew the vm-cache flushing interface.
224 #if defined (AFS_HPUX_ENV)
225 if (vp->v.v_flag & VTEXT) {
228 if (vp->v.v_flag & VTEXT) { /* still has a text object? */
229 MReleaseWriteLock(&afs_ftf);
235 /* next do the stuff that need not check for deadlock problems */
238 /* finally, record that we've done it */
239 hset(vp->flushDV, fdv);
240 MReleaseWriteLock(&afs_ftf);
242 #endif /* AFS_DEC_ENV */
246 /* I don't really like using xinval() here, because it kills processes
247 * a bit aggressively. Previous incarnations of this functionality
248 * used to use xrele() instead of xinval, and didn't invoke
249 * cacheinval(). But they would panic. So it might be worth looking
250 * into some middle ground...
253 afs_gfs_FlushText(register struct vcache *vp)
255 afs_hyper_t fdv; /* version before which we'll flush */
256 register struct text *xp;
259 MObtainWriteLock(&afs_ftf, 318);
260 hset(fdv, vp->m.DataVersion);
264 /* this happens frequently after cores are created. */
265 MReleaseWriteLock(&afs_ftf);
269 if (gp->g_flag & GTEXT) {
271 xp = (struct text *)gp->g_textp;
272 /* if text object is locked, give up */
273 if (xp && (xp->x_flag & XLOCK)) {
274 MReleaseWriteLock(&afs_ftf);
280 if (gp->g_flag & GTEXT) { /* still has a text object? */
285 /* next do the stuff that need not check for deadlock problems */
286 /* maybe xinval(gp); here instead of above */
289 /* finally, record that we've done it */
290 hset(vp->flushDV, fdv);
292 MReleaseWriteLock(&afs_ftf);
294 #endif /* AFS_DEC_ENV */
296 #endif /* AFS_TEXT_ENV */
298 /* mask signals in afsds */
300 afs_osi_MaskSignals(void)
302 #ifdef AFS_LINUX22_ENV
307 /* unmask signals in rxk listener */
309 afs_osi_UnmaskRxkSignals(void)
313 /* register rxk listener proc info */
315 afs_osi_RxkRegister(void)
317 #ifdef AFS_LINUX22_ENV
322 /* procedure for making our processes as invisible as we can */
324 afs_osi_Invisible(void)
326 #ifdef AFS_LINUX22_ENV
327 afs_osi_MaskSignals();
328 #elif defined(AFS_DEC_ENV)
329 u.u_procp->p_type |= SSYS;
330 #elif defined(AFS_SUN5_ENV)
331 curproc->p_flag |= SSYS;
332 #elif defined(AFS_HPUX101_ENV) && !defined(AFS_HPUX1123_ENV)
333 set_system_proc(u.u_procp);
334 #elif defined(AFS_DARWIN_ENV)
335 /* maybe call init_process instead? */
336 current_proc()->p_flag |= P_SYSTEM;
337 #elif defined(AFS_XBSD_ENV)
338 curproc->p_flag |= P_SYSTEM;
339 #elif defined(AFS_SGI_ENV)
343 AFS_STATCNT(osi_Invisible);
347 #if !defined(AFS_LINUX20_ENV) && !defined(AFS_FBSD_ENV)
348 /* set the real time */
350 afs_osi_SetTime(osi_timeval_t * atv)
352 #if defined(AFS_AIX32_ENV)
353 struct timestruc_t t;
355 t.tv_sec = atv->tv_sec;
356 t.tv_nsec = atv->tv_usec * 1000;
357 ksettimer(&t); /* Was -> settimer(TIMEOFDAY, &t); */
358 #elif defined(AFS_SUN55_ENV)
360 #elif defined(AFS_SUN5_ENV)
362 * To get more than second resolution we can use adjtime. The problem
363 * is that the usecs from the server are wrong (by now) so it isn't
364 * worth complicating the following code.
370 sta.time = atv->tv_sec;
373 #elif defined(AFS_SGI_ENV)
379 sta.time = atv->tv_sec;
382 #elif defined(AFS_DARWIN_ENV)
387 /* stolen from kern_time.c */
389 boottime.tv_sec += atv->tv_sec - time.tv_sec;
393 #if !defined(AFS_HPUX1122_ENV)
394 /* drop the setting of the clock for now. spl7 is not
399 t.tv_sec = atv->tv_sec;
400 t.tv_usec = atv->tv_usec;
417 logtchg(atv->tv_sec);
419 #endif /* AFS_DARWIN_ENV */
420 AFS_STATCNT(osi_SetTime);
422 #endif /* AFS_LINUX20_ENV */
426 afs_osi_Alloc(size_t x)
428 register struct osimem *tm = NULL;
431 AFS_STATCNT(osi_Alloc);
432 /* 0-length allocs may return NULL ptr from AFS_KALLOC, so we special-case
433 * things so that NULL returned iff an error occurred */
437 AFS_STATS(afs_stats_cmperf.OutStandingAllocs++);
438 AFS_STATS(afs_stats_cmperf.OutStandingMemUsage += x);
439 #ifdef AFS_LINUX20_ENV
440 return osi_linux_alloc(x, 1);
441 #elif defined(AFS_FBSD_ENV)
442 return osi_fbsd_alloc(x, 1);
445 tm = (struct osimem *)AFS_KALLOC(size);
448 osi_Panic("osi_Alloc: Couldn't allocate %d bytes; out of memory!\n",
455 #if defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV)
458 afs_osi_Alloc_NoSleep(size_t x)
460 register struct osimem *tm;
463 AFS_STATCNT(osi_Alloc);
464 /* 0-length allocs may return NULL ptr from AFS_KALLOC, so we special-case
465 * things so that NULL returned iff an error occurred */
470 AFS_STATS(afs_stats_cmperf.OutStandingAllocs++);
471 AFS_STATS(afs_stats_cmperf.OutStandingMemUsage += x);
472 tm = (struct osimem *)AFS_KALLOC_NOSLEEP(size);
476 #endif /* SUN || SGI */
479 afs_osi_Free(void *x, size_t asize)
481 AFS_STATCNT(osi_Free);
483 return; /* check for putting memZero back */
485 AFS_STATS(afs_stats_cmperf.OutStandingAllocs--);
486 AFS_STATS(afs_stats_cmperf.OutStandingMemUsage -= asize);
487 #if defined(AFS_LINUX20_ENV)
489 #elif defined(AFS_FBSD_ENV)
492 AFS_KFREE((struct osimem *)x, asize);
497 afs_osi_FreeStr(char *x)
499 afs_osi_Free(x, strlen(x) + 1);
502 /* ? is it moderately likely that there are dirty VM pages associated with
505 * Prereqs: avc must be write-locked
507 * System Dependencies: - *must* support each type of system for which
508 * memory mapped files are supported, even if all
509 * it does is return TRUE;
511 * NB: this routine should err on the side of caution for ProcessFS to work
512 * correctly (or at least, not to introduce worse bugs than already exist)
516 osi_VMDirty_p(struct vcache *avc)
520 if (avc->execsOrWriters <= 0)
521 return 0; /* can't be many dirty pages here, I guess */
523 #if defined (AFS_AIX32_ENV)
525 /* because of the level of hardware involvment with VM and all the
526 * warnings about "This routine must be called at VMM interrupt
527 * level", I thought it would be safest to disable interrupts while
528 * looking at the software page fault table. */
530 /* convert vm handle into index into array: I think that stoinio is
531 * always zero... Look into this XXX */
532 #define VMHASH(handle) ( \
533 ( ((handle) & ~vmker.stoinio) \
534 ^ ((((handle) & ~vmker.stoinio) & vmker.stoimask) << vmker.stoihash) \
538 unsigned int pagef, pri, index, next;
540 index = VMHASH(avc->segid);
541 if (scb_valid(index)) { /* could almost be an ASSERT */
543 pri = disable_ints();
544 for (pagef = scb_sidlist(index); pagef >= 0; pagef = next) {
545 next = pft_sidfwd(pagef);
546 if (pft_modbit(pagef)) { /* has page frame been modified? */
556 #endif /* AFS_AIX32_ENV */
558 #if defined (AFS_SUN_ENV)
559 if (avc->states & CMAPPED) {
561 for (pg = avc->v.v_s.v_Pages; pg; pg = pg->p_vpnext) {
574 * Solaris osi_ReleaseVM should not drop and re-obtain the vcache entry lock.
575 * This leads to bad races when osi_ReleaseVM() is called from
576 * afs_InvalidateAllSegments().
578 * We can do this because Solaris osi_VM_Truncate() doesn't care whether the
579 * vcache entry lock is held or not.
581 * For other platforms, in some cases osi_VM_Truncate() doesn't care, but
582 * there may be cases where it does care. If so, it would be good to fix
583 * them so they don't care. Until then, we assume the worst.
585 * Locking: the vcache entry lock is held. It is dropped and re-obtained.
588 osi_ReleaseVM(struct vcache *avc, struct AFS_UCRED *acred)
592 osi_VM_Truncate(avc, 0, acred);
595 ReleaseWriteLock(&avc->lock);
597 osi_VM_Truncate(avc, 0, acred);
599 ObtainWriteLock(&avc->lock, 80);
607 AFS_STATCNT(shutdown_osi);
608 if (afs_cold_shutdown) {
609 LOCK_INIT(&afs_ftf, "afs_ftf");
615 afs_osi_suser(void *credp)
617 #if defined(AFS_SUN5_ENV)
618 return afs_suser(credp);
627 /* afs_osi_TraverseProcTable() - Walk through the systems process
628 * table, calling afs_GCPAGs_perproc_func() for each process.
631 #if defined(AFS_SUN5_ENV)
633 afs_osi_TraverseProcTable(void)
636 for (prp = practive; prp != NULL; prp = prp->p_next) {
637 afs_GCPAGs_perproc_func(prp);
642 #if defined(AFS_HPUX_ENV)
645 * NOTE: h/proc_private.h gives the process table locking rules
646 * It indicates that access to p_cred must be protected by
648 * mp_mtproc_unlock(p);
650 * The code in sys/pm_prot.c uses pcred_lock() to protect access to
651 * the process creds, and uses mp_mtproc_lock() only for audit-related
652 * changes. To be safe, we use both.
656 afs_osi_TraverseProcTable(void)
661 MP_SPINLOCK(activeproc_lock);
662 MP_SPINLOCK(sched_lock);
666 * Instead of iterating through all of proc[], traverse only
667 * the list of active processes. As an example of this,
668 * see foreach_process() in sys/vm_sched.c.
670 * We hold the locks for the entire scan in order to get a
671 * consistent view of the current set of creds.
674 for (p = proc; endchain == 0; p = &proc[p->p_fandx]) {
675 if (p->p_fandx == 0) {
683 afs_GCPAGs_perproc_func(p);
688 MP_SPINUNLOCK(sched_lock);
689 MP_SPINUNLOCK(activeproc_lock);
693 #if defined(AFS_SGI_ENV)
696 /* TODO: Fix this later. */
698 SGI_ProcScanFunc(void *p, void *arg, int mode)
702 #else /* AFS_SGI65_ENV */
704 SGI_ProcScanFunc(proc_t * p, void *arg, int mode)
706 afs_int32(*perproc_func) (struct proc *) = arg;
708 /* we pass in the function pointer for arg,
709 * mode ==0 for startup call, ==1 for each valid proc,
710 * and ==2 for terminate call.
713 code = perproc_func(p);
717 #endif /* AFS_SGI65_ENV */
720 afs_osi_TraverseProcTable(void)
722 procscan(SGI_ProcScanFunc, afs_GCPAGs_perproc_func);
724 #endif /* AFS_SGI_ENV */
726 #if defined(AFS_AIX_ENV)
728 #define max_proc v.ve_proc
731 afs_osi_TraverseProcTable(void)
737 * For binary compatibility, on AIX we need to be careful to use the
738 * proper size of a struct proc, even if it is different from what
739 * we were compiled with.
741 if (!afs_gcpags_procsize)
744 #ifndef AFS_AIX51_ENV
745 simple_lock(&proc_tbl_lock);
747 for (p = (struct proc *)v.vb_proc, i = 0; p < max_proc;
748 p = (struct proc *)((char *)p + afs_gcpags_procsize), i++) {
751 if (p->p_pvprocp->pv_stat == SNONE)
753 if (p->p_pvprocp->pv_stat == SIDL)
755 if (p->p_pvprocp->pv_stat == SEXIT)
758 if (p->p_stat == SNONE)
760 if (p->p_stat == SIDL)
762 if (p->p_stat == SEXIT)
768 if (PROCMASK(p->p_pid) != i) {
769 afs_gcpags = AFS_GCPAGS_EPIDCHECK;
775 if ((p->p_nice < P_NICE_MIN) || (P_NICE_MAX < p->p_nice)) {
776 afs_gcpags = AFS_GCPAGS_ENICECHECK;
780 afs_GCPAGs_perproc_func(p);
782 #ifndef AFS_AIX51_ENV
783 simple_unlock(&proc_tbl_lock);
788 #if defined(AFS_OSF_ENV)
790 afs_osi_TraverseProcTable(void)
792 struct pid_entry *pe;
794 #define pidNPID (pidtab + npid)
799 for (pe = pidtab; pe < pidNPID; ++pe) {
800 if (pe->pe_proc != PROC_NULL)
801 afs_GCPAGs_perproc_func(pe->pe_proc);
807 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
809 afs_osi_TraverseProcTable(void)
812 LIST_FOREACH(p, &allproc, p_list) {
813 if (p->p_stat == SIDL)
815 if (p->p_stat == SZOMB)
817 if (p->p_flag & P_SYSTEM)
819 afs_GCPAGs_perproc_func(p);
824 #if defined(AFS_LINUX22_ENV)
826 afs_osi_TraverseProcTable()
828 struct task_struct *p;
830 #ifdef EXPORTED_TASKLIST_LOCK
831 read_lock(&tasklist_lock);
833 #ifdef DEFINED_FOR_EACH_PROCESS
834 for_each_process(p) if (p->pid) {
835 if (p->state & TASK_ZOMBIE)
837 afs_GCPAGs_perproc_func(p);
840 for_each_task(p) if (p->pid) {
841 if (p->state & TASK_ZOMBIE)
843 afs_GCPAGs_perproc_func(p);
846 #ifdef EXPORTED_TASKLIST_LOCK
847 read_unlock(&tasklist_lock);
852 /* return a pointer (sometimes a static copy ) to the cred for a
854 * subsequent calls may overwrite the previously returned value.
857 #if defined(AFS_SGI65_ENV)
858 const struct AFS_UCRED *
859 afs_osi_proc2cred(AFS_PROC * p)
863 #elif defined(AFS_HPUX_ENV)
864 const struct AFS_UCRED *
865 afs_osi_proc2cred(AFS_PROC * p)
871 * Cannot use afs_warnuser() here, as the code path
872 * eventually wants to grab sched_lock, which is
878 #elif defined(AFS_AIX_ENV)
880 /* GLOBAL DECLARATIONS */
883 * LOCKS: the caller must do
884 * simple_lock(&proc_tbl_lock);
885 * simple_unlock(&proc_tbl_lock);
886 * around calls to this function.
889 const struct AFS_UCRED *
890 afs_osi_proc2cred(AFS_PROC * pproc)
892 struct AFS_UCRED *pcred = 0;
895 * pointer to process user structure valid in *our*
898 * The user structure for a process is stored in the user
899 * address space (as distinct from the kernel address
900 * space), and so to refer to the user structure of a
901 * different process we must employ special measures.
903 * I followed the example used in the AIX getproc() system
904 * call in bos/kernel/proc/getproc.c
906 struct user *xmem_userp;
908 struct xmem dp; /* ptr to xmem descriptor */
909 int xm; /* xmem result */
916 * The process private segment in which the user
917 * area is located may disappear. We need to increment
918 * its use count. Therefore we
919 * - get the proc_tbl_lock to hold the segment.
920 * - get the p_lock to lockout vm_cleardata.
921 * - vm_att to load the segment register (no check)
922 * - xmattach to bump its use count.
923 * - release the p_lock.
924 * - release the proc_tbl_lock.
925 * - do whatever we need.
926 * - xmdetach to decrement the use count.
927 * - vm_det to free the segment register (no check)
932 /* simple_lock(&proc_tbl_lock); */
934 if (pproc->p_adspace != vm_handle(NULLSEGID, (int32long64_t) 0)) {
936 if (pproc->p_adspace != NULLSEGVAL) {
940 simple_lock(&pproc->p_pvprocp->pv_lock);
942 simple_lock(&pproc->p_lock);
945 if (pproc->p_threadcount &&
947 pproc->p_pvprocp->pv_threadlist) {
949 pproc->p_threadlist) {
953 * arbitrarily pick the first thread in pproc
955 struct thread *pproc_thread =
957 pproc->p_pvprocp->pv_threadlist;
963 * location of 'struct user' in pproc's
966 struct user *pproc_userp = pproc_thread->t_userp;
969 * create a pointer valid in my own address space
972 xmem_userp = (struct user *)vm_att(pproc->p_adspace, pproc_userp);
974 dp.aspace_id = XMEM_INVAL;
975 xm = xmattach(xmem_userp, sizeof(*xmem_userp), &dp, SYS_ADSPACE);
979 simple_unlock(&pproc->p_pvprocp->pv_lock);
981 simple_unlock(&pproc->p_lock);
984 /* simple_unlock(&proc_tbl_lock); */
985 if (xm == XMEM_SUCC) {
987 static struct AFS_UCRED cred;
990 * What locking should we use to protect access to the user
991 * area? If needed also change the code in AIX/osi_groups.c.
994 /* copy cred to local address space */
995 cred = *xmem_userp->U_cred;
1001 vm_det((void *)xmem_userp);
1007 #elif defined(AFS_OSF_ENV)
1008 const struct AFS_UCRED *
1009 afs_osi_proc2cred(AFS_PROC * pr)
1011 struct AFS_UCRED *rv = NULL;
1017 if ((pr->p_stat == SSLEEP) || (pr->p_stat == SRUN)
1018 || (pr->p_stat == SSTOP))
1023 #elif defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
1024 const struct AFS_UCRED *
1025 afs_osi_proc2cred(AFS_PROC * pr)
1027 struct AFS_UCRED *rv = NULL;
1028 static struct AFS_UCRED cr;
1034 if ((pr->p_stat == SSLEEP) || (pr->p_stat == SRUN)
1035 || (pr->p_stat == SSTOP)) {
1038 cr.cr_uid = pr->p_cred->pc_ucred->cr_uid;
1039 cr.cr_ngroups = pr->p_cred->pc_ucred->cr_ngroups;
1040 memcpy(cr.cr_groups, pr->p_cred->pc_ucred->cr_groups,
1041 NGROUPS * sizeof(gid_t));
1048 #elif defined(AFS_LINUX22_ENV)
1049 const struct AFS_UCRED *
1050 afs_osi_proc2cred(AFS_PROC * pr)
1052 struct AFS_UCRED *rv = NULL;
1053 static struct AFS_UCRED cr;
1059 if ((pr->state == TASK_RUNNING) || (pr->state == TASK_INTERRUPTIBLE)
1060 || (pr->state == TASK_UNINTERRUPTIBLE)
1061 || (pr->state == TASK_STOPPED)) {
1063 cr.cr_uid = pr->uid;
1064 #if defined(AFS_LINUX26_ENV)
1065 get_group_info(pr->group_info);
1066 cr.cr_group_info = pr->group_info;
1068 cr.cr_ngroups = pr->ngroups;
1069 memcpy(cr.cr_groups, pr->groups, NGROUPS * sizeof(gid_t));
1077 const struct AFS_UCRED *
1078 afs_osi_proc2cred(AFS_PROC * pr)
1080 struct AFS_UCRED *rv = NULL;
1091 #endif /* AFS_GCPAGS */