2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * osi_vfsops.c for IRIX
13 #include <afsconfig.h>
14 #include "afs/param.h"
17 #include "afs/sysincludes.h" /* Standard vendor system headers */
18 #include "afsincludes.h" /* Afs-based standard headers */
19 #include "afs/afs_stats.h" /* statistics stuff */
20 #include "sys/syssgi.h"
23 struct vfs *afs_globalVFS = 0;
24 struct vcache *afs_globalVp = 0;
26 #ifdef AFS_SGI_VNODE_GLUE
27 #include <sys/invent.h>
29 mutex_t afs_init_kern_lock;
33 #define SYS_setgroups SGI_SETGROUPS
38 #include "sys/mload.h"
39 char *Afs_mversion = M_VERSION;
41 extern int (*setgroupsp) (int, gid_t *);
42 extern struct afs_lock afs_xvcache;
43 extern int idbg_afsuser();
44 extern void afs_mpservice(void *);
47 * AFS fs initialization - we also plug system calls here
49 #define NewSystemCall(n,f,a) \
50 syscallsw[ABI_IRIX5].sc_sysent[(n)-1000].sy_narg = a; \
51 syscallsw[ABI_IRIX5].sc_sysent[(n)-1000].sy_call = f; \
52 syscallsw[ABI_IRIX5].sc_sysent[(n)-1000].sy_flags = 0;
53 extern struct vfsops Afs_vfsops, *afs_vfsopsp;
54 extern struct vnodeops Afs_vnodeops, *afs_vnodeopsp;
55 extern void (*afsidestroyp) (struct inode *);
56 extern void afsidestroy(struct inode *);
57 extern int (*idbg_prafsnodep) (vnode_t *);
58 extern int (*idbg_afsvfslistp) (void);
59 extern int idbg_prafsnode(vnode_t *);
60 extern int idbg_afsvfslist(void);
64 Afs_init(struct vfssw *vswp, int fstype)
66 extern int Afs_syscall(), Afs_xsetgroups(), afs_pioctl(), afs_setpag();
67 extern int icreate(), iopen(), iinc(), idec();
68 #ifdef AFS_SGI_XFS_IOPS_ENV
71 extern int iread(), iwrite();
78 #ifdef AFS_SGI_VNODE_GLUE
79 /* Synchronize doing NUMA test. */
80 mutex_init(&afs_init_kern_lock, MUTEX_DEFAULT, "init_kern_lock");
83 * set up pointers from main kernel into us
85 afs_vnodeopsp = &Afs_vnodeops;
86 afs_vfsopsp = &Afs_vfsops;
87 afsidestroyp = afsidestroy;
88 idbg_prafsnodep = idbg_prafsnode;
89 idbg_afsvfslistp = idbg_afsvfslist;
90 NewSystemCall(AFS_SYSCALL, Afs_syscall, 6);
91 NewSystemCall(AFS_PIOCTL, afs_pioctl, 4);
92 NewSystemCall(AFS_SETPAG, afs_setpag, 0);
93 NewSystemCall(AFS_IOPEN, iopen, 3);
94 NewSystemCall(AFS_ICREATE, icreate, 6);
95 NewSystemCall(AFS_IINC, iinc, 3);
96 NewSystemCall(AFS_IDEC, idec, 3);
97 #ifdef AFS_SGI_XFS_IOPS_ENV
98 NewSystemCall(AFS_IOPEN64, iopen64, 4);
100 NewSystemCall(AFS_IREAD, iread, 6);
101 NewSystemCall(AFS_IWRITE, iwrite, 6);
104 /* last replace these */
105 setgroupsp = Afs_xsetgroups;
107 idbg_addfunc("afsuser", idbg_afsuser);
112 extern int afs_mount(), afs_unmount(), afs_root(), afs_statfs();
114 extern int afs_sync(OSI_VFS_DECL(afsp), int flags, struct cred *cr);
116 extern int afs_sync(OSI_VFS_DECL(afsp), short flags, struct cred *cr);
118 extern int afs_vget(OSI_VFS_DECL(afsp), vnode_t ** vpp, struct fid *afidp);
120 struct vfsops afs_lockedvfsops =
122 struct vfsops Afs_vfsops =
127 BHV_IDENTITY_INIT_POSITION(VFS_POSITION_BASE),
134 fs_nosys, /* rootinit */
135 fs_nosys, /* mntupdate */
143 fs_nosys, /* mountroot */
145 fs_nosys, /* realvfsops */
146 fs_import, /* import */
147 fs_nosys, /* quotactl */
149 fs_nosys, /* swapvp */
152 extern struct afs_q VLRU; /*vcache LRU */
155 static bhv_desc_t afs_vfs_bhv;
157 afs_mount(struct vfs *afsp, vnode_t * mvp, struct mounta *uap,
163 AFS_STATCNT(afs_mount);
168 if (mvp->v_type != VDIR)
171 if (afs_globalVFS) { /* Don't allow remounts. */
175 afs_globalVFS = afsp;
176 afsp->vfs_bsize = 8192;
177 afsp->vfs_fsid.val[0] = AFS_VFSMAGIC; /* magic */
178 afsp->vfs_fsid.val[1] = afs_fstype;
180 vfs_insertbhv(afsp, &afs_vfs_bhv, &Afs_vfsops, &afs_vfs_bhv);
182 afsp->vfs_data = NULL;
184 afsp->vfs_fstype = afs_fstype;
185 afsp->vfs_dev = 0xbabebabe; /* XXX this should be unique */
190 afs_unmount(OSI_VFS_ARG(afsp), flags, cr)
196 vnode_t *vp, *rootvp = NULL;
200 OSI_VFS_CONVERT(afsp);
202 AFS_STATCNT(afs_unmount);
208 * flush all pages from inactive vnodes - return
209 * EBUSY if any still in use
211 ObtainWriteLock(&afs_xvcache, 172);
213 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
216 vp = (vnode_t *) tvc;
217 if (error = afs_FlushVCache(tvc, &fv_slept)) {
218 if (vp->v_flag & VROOT) {
222 ReleaseWriteLock(&afs_xvcache);
232 * rootvp gets lots of ref counts
235 tvc = VTOAFS(rootvp);
236 if (tvc->opens || CheckLock(&tvc->lock) || LockWaiters(&tvc->lock)) {
237 ReleaseWriteLock(&afs_xvcache);
240 ReleaseWriteLock(&afs_xvcache);
243 ObtainWriteLock(&afs_xvcache, 173);
244 afs_FlushVCache(tvc, &fv_slept);
246 ReleaseWriteLock(&afs_xvcache);
250 VFS_REMOVEBHV(afsp, &afs_vfs_bhv);
257 afs_root(OSI_VFS_ARG(afsp), avpp)
262 struct vrequest treq;
263 struct vcache *tvp = 0;
264 OSI_VFS_CONVERT(afsp);
266 AFS_STATCNT(afs_root);
267 if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
271 afs_PutVCache(afs_globalVp);
275 if (!(code = afs_InitReq(&treq, OSI_GET_CURRENT_CRED()))
276 && !(code = afs_CheckInit())) {
277 tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
278 /* we really want this to stay around */
287 VN_HOLD(AFSTOV(tvp));
288 s = VN_LOCK(AFSTOV(tvp));
289 AFSTOV(tvp)->v_flag |= VROOT;
290 VN_UNLOCK(AFSTOV(tvp), s);
292 afs_globalVFS = afsp;
296 afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, *avpp,
297 ICL_TYPE_INT32, code);
301 afs_statfs(OSI_VFS_ARG(afsp), abp, avp)
304 struct vnode *avp; /* unused */
306 OSI_VFS_CONVERT(afsp);
308 AFS_STATCNT(afs_statfs);
309 abp->f_bsize = afsp->vfs_bsize;
310 abp->f_frsize = afsp->vfs_bsize;
311 /* Fake a high number below to satisfy programs that use the statfs
312 * call to make sure that there's enough space in the device partition
313 * before storing something there.
315 abp->f_blocks = abp->f_bfree = abp->f_bavail = abp->f_files =
316 abp->f_ffree = abp->f_favail = AFS_VFS_FAKEFREE;
318 abp->f_fsid = AFS_VFSMAGIC; /* magic */
319 strcpy(abp->f_basetype, AFS_MOUNT_AFS);
321 abp->f_namemax = 256;
328 * sync's responsibilities include pushing back DELWRI pages
329 * Things to watch out for:
330 * 1) don't want to hold off new vnodes in the file system
331 * while pushing back pages
332 * 2) since we can deal with un-referenced vndoes need to watch
333 * races with folks who recycle vnodes
335 * SYNC_BDFLUSH - do NOT sleep waiting for an inode - also, when
336 * when pushing DELWRI - only push old ones.
337 * SYNC_PDFLUSH - push v_dpages.
338 * SYNC_ATTR - sync attributes - note that ordering considerations
339 * dictate that we also flush dirty pages
340 * SYNC_WAIT - do synchronouse writes - inode & delwri
341 * SYNC_NOWAIT - start delayed writes.
342 * SYNC_DELWRI - look at inodes w/ delwri pages. Other flags
343 * decide how to deal with them.
344 * SYNC_CLOSE - flush delwri and invalidate others.
345 * SYNC_FSDATA - push fs data (e.g. superblocks)
348 extern afs_int32 vcachegen;
349 #define PREEMPT_MASK 0x7f
355 afs_sync(OSI_VFS_DECL(afsp),
363 /* Why enable the vfs sync operation?? */
364 int error, lasterr, preempt;
367 afs_uint32 lvcachegen;
371 OSI_VFS_CONVERT(afsp);
375 * if not interested in vnodes, skip all this
378 if ((flags & (SYNC_CLOSE | SYNC_DELWRI | SYNC_PDFLUSH)) == 0)
380 #else /* AFS_SGI61_ENV */
381 if ((flags & (SYNC_CLOSE | SYNC_DELWRI | SYNC_ATTR)) == 0)
383 #endif /* AFS_SGI61_ENV */
385 ObtainReadLock(&afs_xvcache);
386 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
389 vp = (vnode_t *) tvc;
391 * Since we push all dirty pages on last close/VOP_INACTIVE
392 * we are only concerned with vnodes with
393 * active reference counts.
396 if (vp->v_count == 0) {
400 if ((flags & SYNC_CLOSE) == 0 && !AFS_VN_DIRTY(vp)) {
406 * ignore vnodes which need no flushing
408 if (flags & SYNC_DELWRI) {
409 if (!AFS_VN_DIRTY(vp)) {
415 else if (flags & SYNC_PDFLUSH) {
416 if (!VN_GET_DPAGES(vp)) {
421 #endif /* AFS_SGI61_ENV */
425 lvcachegen = vcachegen;
426 ReleaseReadLock(&afs_xvcache);
429 * Try to lock rwlock without sleeping. If we can't, we must
432 if (afs_rwlock_nowait(vp, 1) == 0) {
434 if (flags & (SYNC_BDFLUSH | SYNC_PDFLUSH))
435 #else /* AFS_SGI61_ENV */
436 if (flags & SYNC_BDFLUSH)
437 #endif /* AFS_SGI61_ENV */
440 ObtainReadLock(&afs_xvcache);
441 if (vcachegen != lvcachegen) {
442 ReleaseReadLock(&afs_xvcache);
447 AFS_RWLOCK(vp, VRWLOCK_WRITE);
451 if (flags & SYNC_CLOSE) {
452 PFLUSHINVALVP(vp, (off_t) 0, (off_t) tvc->f.m.Length);
455 else if (flags & SYNC_PDFLUSH) {
456 if (VN_GET_DPAGES(vp)) {
457 pdflush(vp, B_ASYNC);
460 #endif /* AFS_SGI61_ENV */
463 if ((flags & SYNC_DELWRI) && AFS_VN_DIRTY(vp)) {
465 PFLUSHVP(vp, (off_t) tvc->f.m.Length,
466 (flags & SYNC_WAIT) ? 0 : B_ASYNC, error);
467 #else /* AFS_SGI61_ENV */
468 if (flags & SYNC_WAIT)
469 /* push all and wait */
470 PFLUSHVP(vp, (off_t) tvc->f.m.Length, (off_t) 0, error);
471 else if (flags & SYNC_BDFLUSH) {
473 error = pdflush(vp, B_ASYNC);
475 /* push all but don't wait */
476 PFLUSHVP(vp, (off_t) tvc->f.m.Length, (off_t) B_ASYNC, error);
478 #endif /* AFS_SGI61_ENV */
482 * Release vp, check error and whether to preempt, and if
483 * we let go of xvcache lock and someone has changed the
484 * VLRU, restart the loop
487 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
491 if ((++preempt & PREEMPT_MASK) == 0) {
496 ObtainReadLock(&afs_xvcache);
497 if (vcachegen != lvcachegen) {
498 ReleaseReadLock(&afs_xvcache);
502 ReleaseReadLock(&afs_xvcache);
508 afs_vget(OSI_VFS_DECL(afsp), vnode_t ** avcp, struct fid * fidp)
510 struct VenusFid vfid;
511 struct vrequest treq;
516 #if defined(AFS_SGI64_ENV) && defined(CKPT) && !defined(_R5000_CVT_WAR)
520 OSI_VFS_CONVERT(afsp);
522 AFS_STATCNT(afs_vget);
526 #if defined(AFS_SGI64_ENV) && defined(CKPT) && !defined(_R5000_CVT_WAR)
527 afid2 = (afs_fid2_t *) fidp;
528 if (afid2->af_len == sizeof(afs_fid2_t) - sizeof(afid2->af_len)) {
529 /* It's a checkpoint restart fid. */
530 tcell = afs_GetCellByIndex(afid2->af_cell, READ_LOCK);
535 vfid.Cell = tcell->cellNum;
536 afs_PutCell(tcell, READ_LOCK);
537 vfid.Fid.Volume = afid2->af_volid;
538 vfid.Fid.Vnode = afid2->af_vno;
539 vfid.Fid.Unique = afid2->af_uniq;
541 if (code = afs_InitReq(&treq, OSI_GET_CURRENT_CRED()))
544 (vnode_t *) afs_GetVCache(&vfid, &treq, NULL, (struct vcache *)0);
552 if (code = afs_InitReq(&treq, OSI_GET_CURRENT_CRED()))
554 code = afs_osi_vget((struct vcache **)avcp, fidp, &treq);
557 afs_Trace3(afs_iclSetp, CM_TRACE_VGET, ICL_TYPE_POINTER, *avcp,
558 ICL_TYPE_INT32, treq.uid, ICL_TYPE_FID, &vfid);
559 code = afs_CheckCode(code, &treq, 42);
564 #ifdef MP /* locked versions of vfs operations. */
566 /* wrappers for vfs calls */
568 #define AFS_MP_VFS_ARG(A) bhv_desc_t A
570 #define AFS_MP_VFS_ARG(A) struct vfs A
574 mp_afs_mount(struct vfs *a, struct vnode *b, struct mounta *c,
582 rv = afs_lockedvfsops.vfs_mount(a, b, c, d
592 mp_afs_unmount(AFS_MP_VFS_ARG(*a), int b, struct cred *c)
596 rv = afs_lockedvfsops.vfs_unmount(a, b, c);
602 mp_afs_root(AFS_MP_VFS_ARG(*a), struct vnode **b)
606 rv = afs_lockedvfsops.vfs_root(a, b);
612 mp_afs_statvfs(AFS_MP_VFS_ARG(*a), struct statvfs *b, struct vnode *c)
616 rv = afs_lockedvfsops.vfs_statvfs(a, b, c);
622 mp_afs_sync(AFS_MP_VFS_ARG(*a),
632 rv = afs_lockedvfsops.vfs_sync(a, b, c);
638 mp_afs_vget(AFS_MP_VFS_ARG(*a), struct vnode **b, struct fid *c)
642 rv = afs_lockedvfsops.vfs_vget(a, b, c);
647 struct vfsops Afs_vfsops = {
650 BHV_IDENTITY_INIT_POSITION(VFS_POSITION_BASE),
657 fs_nosys, /* rootinit */
658 fs_nosys, /* mntupdate */
666 fs_nosys, /* mountroot */
668 fs_nosys, /* realvfsops */
669 fs_import, /* import */
670 fs_nosys, /* quotactl */
672 fs_nosys, /* swapvp */