2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "../afs/param.h"
15 #include "../afs/sysincludes.h" /* Standard vendor system headers */
16 #include "../afs/afsincludes.h" /* Afs-based standard headers */
17 #include "../afs/afs_stats.h" /* afs statistics */
19 #include <sys/adspace.h> /* for vm_att(), vm_det() */
22 static char memZero; /* address of 0 bytes for kmem_alloc */
23 extern int afs_osicred_initialized;
29 /* osi_Init -- do once per kernel installation initialization.
30 * -- On Solaris this is called from modload initialization.
31 * -- On AIX called from afs_config.
32 * -- On HP called from afsc_link.
33 * -- On SGI called from afs_init. */
36 lock_t afs_event_lock;
46 if (once++ > 0) /* just in case */
48 #if defined(AFS_HPUX_ENV)
50 #else /* AFS_HPUX_ENV */
51 #if defined(AFS_GLOBAL_SUNLOCK)
52 #if defined(AFS_SGI62_ENV)
53 mutex_init(&afs_global_lock, MUTEX_DEFAULT, "afs_global_lock");
54 #elif defined(AFS_OSF_ENV)
55 usimple_lock_init(&afs_global_lock);
56 afs_global_owner = (thread_t)0;
57 #elif defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
58 lockinit(&afs_global_lock, PLOCK, "afs global lock", 0, 0);
59 afs_global_owner = (thread_t)0;
60 #elif defined(AFS_AIX41_ENV)
61 lock_alloc((void*)&afs_global_lock, LOCK_ALLOC_PIN, 1, 1);
62 simple_lock_init((void *)&afs_global_lock);
64 #ifndef AFS_LINUX22_ENV
65 /* Linux initialization in osi directory. Should move the others. */
66 mutex_init(&afs_global_lock, "afs_global_lock", MUTEX_DEFAULT, NULL);
69 /* afs_rxglobal_lock is initialized in rx_Init. */
71 #endif /* AFS_HPUX_ENV */
73 if ( !afs_osicred_initialized )
75 memset((char *)&afs_osi_cred, 0, sizeof(struct AFS_UCRED));
76 crhold(&afs_osi_cred); /* don't let it evaporate */
77 afs_osicred_initialized = 1;
80 osi_flid.fl_pid = osi_flid.fl_sysid = 0;
85 register struct vcache *avc; {
86 AFS_STATCNT(osi_Active);
87 #if defined(AFS_SUN_ENV) || defined(AFS_AIX_ENV) || defined(AFS_OSF_ENV) || defined(AFS_SUN5_ENV) || (AFS_LINUX20_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
88 if ((avc->opens > 0) || (avc->states & CMAPPED)) return 1; /* XXX: Warning, verify this XXX */
90 #if defined(AFS_MACH_ENV)
91 if (avc->opens > 0 || ((avc->v.v_flag & VTEXT) && !inode_uncache_try(avc))) return 1;
93 #if defined(AFS_SGI_ENV)
94 if ((avc->opens > 0) || AFS_VN_MAPPED((struct vnode *)avc))
97 if (avc->opens > 0 || (avc->v.v_flag & VTEXT)) return(1);
99 #endif /* AFS_MACH_ENV */
104 /* this call, unlike osi_FlushText, is supposed to discard caches that may
105 contain invalid information if a file is written remotely, but that may
106 contain valid information that needs to be written back if the file is
107 being written locally. It doesn't subsume osi_FlushText, since the latter
108 function may be needed to flush caches that are invalidated by local writes.
110 avc->pvnLock is already held, avc->lock is guaranteed not to be held (by
113 void osi_FlushPages(avc, credp)
114 register struct vcache *avc;
115 struct AFS_UCRED *credp;
118 ObtainReadLock(&avc->lock);
119 /* If we've already purged this version, or if we're the ones
120 writing this version, don't flush it (could lose the
121 data we're writing). */
122 if ((hcmp((avc->m.DataVersion), (avc->mapDV)) <= 0) ||
123 ((avc->execsOrWriters > 0) && afs_DirtyPages(avc))) {
124 ReleaseReadLock(&avc->lock);
127 ReleaseReadLock(&avc->lock);
128 ObtainWriteLock(&avc->lock,10);
130 if ((hcmp((avc->m.DataVersion), (avc->mapDV)) <= 0) ||
131 ((avc->execsOrWriters > 0) && afs_DirtyPages(avc))) {
132 ReleaseWriteLock(&avc->lock);
135 if (hiszero(avc->mapDV)) {
136 hset(avc->mapDV, avc->m.DataVersion);
137 ReleaseWriteLock(&avc->lock);
141 AFS_STATCNT(osi_FlushPages);
142 hset(origDV, avc->m.DataVersion);
143 afs_Trace3(afs_iclSetp, CM_TRACE_FLUSHPAGES, ICL_TYPE_POINTER, avc,
144 ICL_TYPE_INT32, origDV.low, ICL_TYPE_INT32, avc->m.Length);
146 ReleaseWriteLock(&avc->lock);
148 osi_VM_FlushPages(avc, credp);
150 ObtainWriteLock(&avc->lock,88);
152 /* do this last, and to original version, since stores may occur
153 while executing above PUTPAGE call */
154 hset(avc->mapDV, origDV);
155 ReleaseWriteLock(&avc->lock);
158 afs_lock_t afs_ftf; /* flush text lock */
162 /* This call is supposed to flush all caches that might be invalidated
163 * by either a local write operation or a write operation done on
164 * another client. This call may be called repeatedly on the same
165 * version of a file, even while a file is being written, so it
166 * shouldn't do anything that would discard newly written data before
167 * it is written to the file system. */
169 void osi_FlushText_really(vp)
170 register struct vcache *vp; {
171 afs_hyper_t fdv; /* version before which we'll flush */
173 AFS_STATCNT(osi_FlushText);
174 /* see if we've already flushed this data version */
175 if (hcmp(vp->m.DataVersion, vp->flushDV) <= 0) return;
179 void afs_gfs_FlushText();
180 afs_gfs_FlushText(vp);
185 MObtainWriteLock(&afs_ftf,317);
186 hset(fdv, vp->m.DataVersion);
188 /* why this disgusting code below?
189 * xuntext, called by xrele, doesn't notice when it is called
190 * with a freed text object. Sun continually calls xrele or xuntext
191 * without any locking, as long as VTEXT is set on the
192 * corresponding vnode.
193 * But, if the text object is locked when you check the VTEXT
194 * flag, several processes can wait in xuntext, waiting for the
195 * text lock; when the second one finally enters xuntext's
196 * critical region, the text object is already free, but the check
197 * was already done by xuntext's caller.
198 * Even worse, it turns out that xalloc locks the text object
199 * before reading or stating a file via the vnode layer. Thus, we
200 * could end up in getdcache, being asked to bring in a new
201 * version of a file, but the corresponding text object could be
202 * locked. We can't flush the text object without causing
203 * deadlock, so now we just don't try to lock the text object
204 * unless it is guaranteed to work. And we try to flush the text
205 * when we need to a bit more often at the vnode layer. Sun
206 * really blew the vm-cache flushing interface.
209 #if defined (AFS_HPUX_ENV)
210 if (vp->v.v_flag & VTEXT) {
213 if (vp->v.v_flag & VTEXT) { /* still has a text object? */
214 MReleaseWriteLock(&afs_ftf);
220 /* next do the stuff that need not check for deadlock problems */
223 /* finally, record that we've done it */
224 hset(vp->flushDV, fdv);
225 MReleaseWriteLock(&afs_ftf);
227 #endif /* AFS_DEC_ENV */
231 /* I don't really like using xinval() here, because it kills processes
232 * a bit aggressively. Previous incarnations of this functionality
233 * used to use xrele() instead of xinval, and didn't invoke
234 * cacheinval(). But they would panic. So it might be worth looking
235 * into some middle ground...
238 afs_gfs_FlushText(vp)
239 register struct vcache *vp; {
240 afs_hyper_t fdv; /* version before which we'll flush */
241 register struct text *xp;
244 MObtainWriteLock(&afs_ftf,318);
245 hset(fdv, vp->m.DataVersion);
249 /* this happens frequently after cores are created. */
250 MReleaseWriteLock(&afs_ftf);
254 if (gp->g_flag & GTEXT) {
256 xp = (struct text *) gp->g_textp ;
257 /* if text object is locked, give up */
258 if (xp && (xp->x_flag & XLOCK)) {
259 MReleaseWriteLock(&afs_ftf);
263 else xp = (struct text *) 0;
265 if (gp->g_flag & GTEXT) {/* still has a text object? */
270 /* next do the stuff that need not check for deadlock problems */
271 /* maybe xinval(gp); here instead of above */
274 /* finally, record that we've done it */
275 hset(vp->flushDV, fdv);
277 MReleaseWriteLock(&afs_ftf);
279 #endif /* AFS_DEC_ENV */
281 #endif /* AFS_TEXT_ENV */
283 /* mask signals in afsds */
284 void afs_osi_MaskSignals(){
285 #ifdef AFS_LINUX22_ENV
286 spin_lock_irq(¤t->sigmask_lock);
287 sigfillset(¤t->blocked);
288 recalc_sigpending(current);
289 spin_unlock_irq(¤t->sigmask_lock);
293 /* procedure for making our processes as invisible as we can */
294 void afs_osi_Invisible() {
295 #ifdef AFS_LINUX22_ENV
296 afs_osi_MaskSignals();
299 u.u_procp->p_type |= SSYS;
302 curproc->p_flag |= SSYS;
305 set_system_proc(u.u_procp);
307 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
308 /* maybe call init_process instead? */
309 current_proc()->p_flag |= P_SYSTEM;
311 #if defined(AFS_SGI_ENV)
313 #endif /* AFS_SGI_ENV */
315 AFS_STATCNT(osi_Invisible);
319 #ifndef AFS_LINUX20_ENV /* Linux version in osi_misc.c */
320 /* set the real time */
322 register osi_timeval_t *atv; {
325 struct timestruc_t t;
327 t.tv_sec = atv->tv_sec;
328 t.tv_nsec = atv->tv_usec * 1000;
329 ksettimer(&t); /* Was -> settimer(TIMEOFDAY, &t); */
336 * To get more than second resolution we can use adjtime. The problem
337 * is that the usecs from the server are wrong (by now) so it isn't
338 * worth complicating the following code.
343 extern int stime(struct stimea *time, rval_t *rvp);
345 sta.time = atv->tv_sec;
349 #if defined(AFS_SGI_ENV)
353 extern int stime(struct stimea *time);
356 sta.time = atv->tv_sec;
360 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
365 /* stolen from kern_time.c */
367 boottime.tv_sec += atv->tv_sec - time.tv_sec;
373 t.tv_sec = atv->tv_sec;
374 t.tv_usec = atv->tv_usec;
375 s = spl7(); time = t; (void) splx(s);
381 s = splclock(); time = *atv; (void) splx(s);
386 logtchg(atv->tv_sec);
388 #endif /* AFS_DARWIN_ENV || AFS_FBSD_ENV */
389 #endif /* AFS_SGI_ENV */
390 #endif /* AFS_SUN55_ENV */
391 #endif /* AFS_SUN5_ENV */
392 #endif /* AFS_AIX32_ENV */
393 AFS_STATCNT(osi_SetTime);
396 #endif /* AFS_LINUX20_ENV */
399 void *afs_osi_Alloc(size_t x)
401 register struct osimem *tm = NULL;
404 AFS_STATCNT(osi_Alloc);
405 /* 0-length allocs may return NULL ptr from AFS_KALLOC, so we special-case
406 things so that NULL returned iff an error occurred */
407 if (x == 0) return &memZero;
409 AFS_STATS(afs_stats_cmperf.OutStandingAllocs++);
410 AFS_STATS(afs_stats_cmperf.OutStandingMemUsage += x);
411 #ifdef AFS_LINUX20_ENV
412 return osi_linux_alloc(x);
415 tm = (struct osimem *) AFS_KALLOC(size);
418 osi_Panic("osi_Alloc: Couldn't allocate %d bytes; out of memory!\n",
425 #if defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV)
427 void *afs_osi_Alloc_NoSleep(size_t x)
429 register struct osimem *tm;
432 AFS_STATCNT(osi_Alloc);
433 /* 0-length allocs may return NULL ptr from AFS_KALLOC, so we special-case
434 things so that NULL returned iff an error occurred */
435 if (x == 0) return &memZero;
438 AFS_STATS(afs_stats_cmperf.OutStandingAllocs++);
439 AFS_STATS(afs_stats_cmperf.OutStandingMemUsage += x);
440 tm = (struct osimem *) AFS_KALLOC_NOSLEEP(size);
444 #endif /* SUN || SGI */
446 void afs_osi_Free(void *x, size_t asize)
448 register struct osimem *tm, **lm, *um;
450 AFS_STATCNT(osi_Free);
451 if (x == &memZero) return; /* check for putting memZero back */
453 AFS_STATS(afs_stats_cmperf.OutStandingAllocs--);
454 AFS_STATS(afs_stats_cmperf.OutStandingMemUsage -= asize);
455 #ifdef AFS_LINUX20_ENV
458 AFS_KFREE((struct osimem *)x, asize);
463 /* ? is it moderately likely that there are dirty VM pages associated with
466 * Prereqs: avc must be write-locked
468 * System Dependencies: - *must* support each type of system for which
469 * memory mapped files are supported, even if all
470 * it does is return TRUE;
472 * NB: this routine should err on the side of caution for ProcessFS to work
473 * correctly (or at least, not to introduce worse bugs than already exist)
482 if (avc->execsOrWriters <= 0)
483 return 0; /* can't be many dirty pages here, I guess */
485 #if defined (AFS_AIX32_ENV)
487 /* because of the level of hardware involvment with VM and all the
488 * warnings about "This routine must be called at VMM interrupt
489 * level", I thought it would be safest to disable interrupts while
490 * looking at the software page fault table. */
492 /* convert vm handle into index into array: I think that stoinio is
493 * always zero... Look into this XXX */
494 #define VMHASH(handle) ( \
495 ( ((handle) & ~vmker.stoinio) \
496 ^ ((((handle) & ~vmker.stoinio) & vmker.stoimask) << vmker.stoihash) \
500 unsigned int pagef, pri, index, next;
501 extern struct vmkerdata vmker;
503 index = VMHASH(avc->vmh);
504 if (scb_valid(index)) { /* could almost be an ASSERT */
506 pri = disable_ints();
507 for (pagef = scb_sidlist(index); pagef >= 0; pagef = next) {
508 next = pft_sidfwd(pagef);
509 if (pft_modbit(pagef)) { /* has page frame been modified? */
519 #endif /* AFS_AIX32_ENV */
521 #if defined (AFS_SUN_ENV)
522 if (avc->states & CMAPPED) {
524 for (pg = avc->v.v_s.v_Pages ; pg ; pg = pg->p_vpnext) {
537 * Solaris osi_ReleaseVM should not drop and re-obtain the vcache entry lock.
538 * This leads to bad races when osi_ReleaseVM() is called from
539 * afs_InvalidateAllSegments().
541 * We can do this because Solaris osi_VM_Truncate() doesn't care whether the
542 * vcache entry lock is held or not.
544 * For other platforms, in some cases osi_VM_Truncate() doesn't care, but
545 * there may be cases where it does care. If so, it would be good to fix
546 * them so they don't care. Until then, we assume the worst.
548 * Locking: the vcache entry lock is held. It is dropped and re-obtained.
551 osi_ReleaseVM(avc, acred)
553 struct AFS_UCRED *acred;
557 osi_VM_Truncate(avc, 0, acred);
560 ReleaseWriteLock(&avc->lock);
562 osi_VM_Truncate(avc, 0, acred);
564 ObtainWriteLock(&avc->lock, 80);
571 extern int afs_cold_shutdown;
573 AFS_STATCNT(shutdown_osi);
574 if (afs_cold_shutdown) {
575 LOCK_INIT(&afs_ftf, "afs_ftf");
583 return afs_suser(credp);
591 /* afs_osi_TraverseProcTable() - Walk through the systems process
592 * table, calling afs_GCPAGs_perproc_func() for each process.
595 #if defined(AFS_SUN5_ENV)
596 void afs_osi_TraverseProcTable()
599 for (prp = practive; prp != NULL; prp = prp->p_next) {
600 afs_GCPAGs_perproc_func(prp);
605 #if defined(AFS_HPUX_ENV)
608 * NOTE: h/proc_private.h gives the process table locking rules
609 * It indicates that access to p_cred must be protected by
611 * mp_mtproc_unlock(p);
613 * The code in sys/pm_prot.c uses pcred_lock() to protect access to
614 * the process creds, and uses mp_mtproc_lock() only for audit-related
615 * changes. To be safe, we use both.
618 void afs_osi_TraverseProcTable()
623 MP_SPINLOCK(activeproc_lock);
624 MP_SPINLOCK(sched_lock);
628 * Instead of iterating through all of proc[], traverse only
629 * the list of active processes. As an example of this,
630 * see foreach_process() in sys/vm_sched.c.
632 * We hold the locks for the entire scan in order to get a
633 * consistent view of the current set of creds.
636 for(p = proc; endchain == 0; p = &proc[p->p_fandx]) {
637 if (p->p_fandx == 0) {
645 afs_GCPAGs_perproc_func(p);
650 MP_SPINUNLOCK(sched_lock);
651 MP_SPINUNLOCK(activeproc_lock);
655 #if defined(AFS_SGI_ENV)
658 /* TODO: Fix this later. */
659 static int SGI_ProcScanFunc(void *p, void *arg, int mode)
663 #else /* AFS_SGI65_ENV */
664 static int SGI_ProcScanFunc(proc_t *p, void *arg, int mode)
666 afs_int32 (*perproc_func)(struct proc *) = arg;
668 /* we pass in the function pointer for arg,
669 * mode ==0 for startup call, ==1 for each valid proc,
670 * and ==2 for terminate call.
673 code = perproc_func(p);
677 #endif /* AFS_SGI65_ENV */
679 void afs_osi_TraverseProcTable()
681 procscan(SGI_ProcScanFunc, afs_GCPAGs_perproc_func);
683 #endif /* AFS_SGI_ENV */
685 #if defined(AFS_AIX_ENV)
686 void afs_osi_TraverseProcTable()
692 * For binary compatibility, on AIX we need to be careful to use the
693 * proper size of a struct proc, even if it is different from what
694 * we were compiled with.
696 if (!afs_gcpags_procsize)
699 simple_lock(&proc_tbl_lock);
700 for (p = (struct proc *)v.vb_proc, i = 0;
702 p = (struct proc *)((char *)p + afs_gcpags_procsize), i++) {
704 if (p->p_stat == SNONE)
706 if (p->p_stat == SIDL)
708 if (p->p_stat == SEXIT)
713 if (PROCMASK(p->p_pid) != i) {
714 afs_gcpags = AFS_GCPAGS_EPIDCHECK;
720 if ((p->p_nice < P_NICE_MIN) || (P_NICE_MAX < p->p_nice)) {
721 afs_gcpags = AFS_GCPAGS_ENICECHECK;
725 afs_GCPAGs_perproc_func(p);
727 simple_unlock(&proc_tbl_lock);
731 #if defined(AFS_OSF_ENV)
732 void afs_osi_TraverseProcTable()
734 struct pid_entry *pe;
736 extern struct pid_entry *pidtab;
738 #define pidNPID (pidtab + npid)
743 for (pe = pidtab; pe < pidNPID; ++pe) {
744 if (pe->pe_proc != PROC_NULL)
745 afs_GCPAGs_perproc_func(pe->pe_proc);
751 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
752 void afs_osi_TraverseProcTable()
755 LIST_FOREACH(p, &allproc, p_list) {
756 if (p->p_stat == SIDL)
758 if (p->p_stat == SZOMB)
760 if (p->p_flag & P_SYSTEM)
762 afs_GCPAGs_perproc_func(p);
767 /* return a pointer (sometimes a static copy ) to the cred for a
769 * subsequent calls may overwrite the previously returned value.
772 #if defined(AFS_SGI65_ENV)
773 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *pr)
777 #elif defined(AFS_HPUX_ENV)
778 const struct AFS_UCRED *afs_osi_proc2cred(proc_t *p)
784 * Cannot use afs_warnuser() here, as the code path
785 * eventually wants to grab sched_lock, which is
791 #elif defined(AFS_AIX_ENV)
793 /* GLOBAL DECLARATIONS */
795 extern int xmattach(); /* fills out cross memory descriptor */
796 extern int xmdetach(); /* decrements reference count to segment */
799 * LOCKS: the caller must do
800 * simple_lock(&proc_tbl_lock);
801 * simple_unlock(&proc_tbl_lock);
802 * around calls to this function.
805 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *pproc)
807 struct AFS_UCRED *pcred = 0;
810 * pointer to process user structure valid in *our*
813 * The user structure for a process is stored in the user
814 * address space (as distinct from the kernel address
815 * space), and so to refer to the user structure of a
816 * different process we must employ special measures.
818 * I followed the example used in the AIX getproc() system
819 * call in bos/kernel/proc/getproc.c
821 struct user *xmem_userp;
823 struct xmem dp; /* ptr to xmem descriptor */
824 int xm; /* xmem result */
831 * The process private segment in which the user
832 * area is located may disappear. We need to increment
833 * its use count. Therefore we
834 * - get the proc_tbl_lock to hold the segment.
835 * - get the p_lock to lockout vm_cleardata.
836 * - vm_att to load the segment register (no check)
837 * - xmattach to bump its use count.
838 * - release the p_lock.
839 * - release the proc_tbl_lock.
840 * - do whatever we need.
841 * - xmdetach to decrement the use count.
842 * - vm_det to free the segment register (no check)
847 /* simple_lock(&proc_tbl_lock); */
848 if (pproc->p_adspace != NULLSEGVAL) {
850 simple_lock(&pproc->p_lock);
852 if (pproc->p_threadcount &&
853 pproc->p_threadlist) {
856 * arbitrarily pick the first thread in pproc
858 struct thread *pproc_thread =
862 * location of 'struct user' in pproc's
865 struct user *pproc_userp =
866 pproc_thread->t_userp;
869 * create a pointer valid in my own address space
873 (struct user *)vm_att(pproc->p_adspace,
876 dp.aspace_id = XMEM_INVAL;
877 xm = xmattach(xmem_userp,
882 simple_unlock(&pproc->p_lock);
884 /* simple_unlock(&proc_tbl_lock); */
885 if (xm == XMEM_SUCC) {
887 static struct AFS_UCRED cred;
890 * What locking should we use to protect access to the user
891 * area? If needed also change the code in AIX/osi_groups.c.
894 /* copy cred to local address space */
895 cred = *xmem_userp->U_cred;
901 vm_det((void *)xmem_userp);
907 #elif defined(AFS_OSF_ENV)
908 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *pr)
910 struct AFS_UCRED *rv=NULL;
916 if((pr->p_stat == SSLEEP) ||
917 (pr->p_stat == SRUN) ||
918 (pr->p_stat == SSTOP))
923 #elif defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
924 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *pr)
926 struct AFS_UCRED *rv=NULL;
927 static struct AFS_UCRED cr;
933 if((pr->p_stat == SSLEEP) ||
934 (pr->p_stat == SRUN) ||
935 (pr->p_stat == SSTOP)) {
938 cr.cr_uid=pr->p_cred->pc_ucred->cr_uid;
939 cr.cr_ngroups=pr->p_cred->pc_ucred->cr_ngroups;
940 memcpy(cr.cr_groups, pr->p_cred->pc_ucred->cr_groups, NGROUPS *
949 const struct AFS_UCRED *afs_osi_proc2cred(AFS_PROC *pr)
951 struct AFS_UCRED *rv=NULL;
962 #endif /* AFS_GCPAGS */