2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
15 #include <afsconfig.h>
16 #include "afs/param.h"
21 #include "afs/sysincludes.h" /* Standard vendor system headers */
22 #include "afsincludes.h" /* Afs-based standard headers */
23 #include "afs/afs_stats.h" /* statistics */
24 #include "afs/afs_cbqueue.h"
25 #include "afs/nfsclient.h"
26 #include "afs/afs_osidnlc.h"
28 /* Static prototypes */
29 static int HandleGetLock(register struct vcache *avc,
30 register struct AFS_FLOCK *af,
31 register struct vrequest *areq, int clid);
32 static int GetFlockCount(struct vcache *avc, struct vrequest *areq);
33 static int lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
34 register struct SimpleLocks *alp, int onlymine,
36 static void DoLockWarning(void);
38 /* int clid; * non-zero on SGI, OSF, SunOS, Darwin, xBSD ** XXX ptr type */
40 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
42 #if defined(AFS_SUN5_ENV)
43 register proc_t *procp = ttoproc(curthread);
45 #if !defined(AFS_AIX41_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_SGI65_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
47 struct proc *procp = OSI_GET_CURRENT_PROCP();
49 struct proc *procp = u.u_procp;
50 #endif /* AFS_SGI_ENV */
53 #if defined(AFS_SGI65_ENV)
55 get_current_flid(&flid);
64 slp->sysid = u.u_sysid;
68 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV)
71 slp->pid = procp->p_pid;
73 slp->sysid = procp->p_sysid;
74 slp->pid = procp->p_epid;
77 #if defined(AFS_SGI_ENV)
79 slp->sysid = flid.fl_sysid;
81 slp->sysid = OSI_GET_CURRENT_SYSID();
85 #if defined(AFS_SUN_ENV) || defined(AFS_OSF_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
88 #if defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
91 slp->pid = u.u_procp->p_pid;
94 #endif /* AFS_AIX_ENV */
95 #endif /* AFS_AIX32_ENV */
98 #if defined(AFS_AIX32_ENV)
101 flock->l_pid = getpid();
103 flock->l_sysid = u.u_sysid;
104 flock->l_pid = u.u_epid;
107 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV)
110 flock->l_pid = procp->p_pid;
112 flock->l_sysid = procp->p_sysid;
113 flock->l_pid = procp->p_epid;
116 #if defined(AFS_SGI_ENV)
118 flock->l_sysid = flid.fl_sysid;
120 flock->l_sysid = OSI_GET_CURRENT_SYSID();
124 #if defined(AFS_SUN_ENV) || defined(AFS_OSF_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
127 #if defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
128 flock->l_pid = getpid();
130 flock->l_pid = u.u_procp->p_pid;
134 #endif /* AFS_AIX_ENV */
135 #endif /* AFS_AIX32_ENV */
139 /* return 1 (true) if specified flock does not match alp (if
140 * specified), or any of the slp structs (if alp == 0)
142 /* I'm not sure that the comparsion of flock->pid to p_ppid
143 * is correct. Should that be a comparision of alp (or slp) ->pid
144 * to p_ppid? Especially in the context of the lower loop, where
145 * the repeated comparison doesn't make much sense...
147 /* onlymine - don't match any locks which are held by my parent */
148 /* clid - only irix 6.5 */
151 lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
152 register struct SimpleLocks *alp, int onlymine, int clid)
154 register struct SimpleLocks *slp;
155 #if defined(AFS_SUN5_ENV)
156 register proc_t *procp = ttoproc(curthread);
158 #if !defined(AFS_AIX41_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_SGI65_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
160 struct proc *procp = curprocp;
161 #else /* AFS_SGI64_ENV */
162 struct proc *procp = u.u_procp;
163 #endif /* AFS_SGI64_ENV */
168 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
169 if (flock1->l_sysid != alp->sysid) {
173 if ((flock1->l_pid == alp->pid) ||
174 #if defined(AFS_AIX41_ENV) || defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
175 (!onlymine && (flock1->l_pid == getppid()))
177 #if defined(AFS_SGI65_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
178 /* XXX check this. used to be *only* irix for some reason. */
179 (!onlymine && (flock1->l_pid == clid))
181 (!onlymine && (flock1->l_pid == procp->p_ppid))
190 for (slp = vp->slocks; slp; slp = slp->next) {
191 #if defined(AFS_HAVE_FLOCK_SYSID)
192 if (flock1->l_sysid != slp->sysid) {
196 if (flock1->l_pid == slp->pid) {
200 return (1); /* failure */
204 /* we don't send multiple read flocks to the server, but rather just count
205 them up ourselves. Of course, multiple write locks are incompatible.
207 Note that we should always try to release a lock, even if we have
208 a network problem sending the release command through, since often
209 a lock is released on a close call, when the user can't retry anyway.
211 After we remove it from our structure, the lock will no longer be
212 kept alive, and the server should time it out within a few minutes.
214 94.04.13 add "force" parameter. If a child explicitly unlocks a
215 file, I guess we'll permit it. however, we don't want simple,
216 innocent closes by children to unlock files in the parent process.
218 /* clid - nonzero on sgi sunos osf1 only */
220 HandleFlock(register struct vcache *avc, int acom, struct vrequest *areq,
221 pid_t clid, int onlymine)
224 struct SimpleLocks *slp, *tlp, **slpp;
226 struct AFSVolSync tsync;
228 struct AFS_FLOCK flock;
230 AFS_STATCNT(HandleFlock);
231 code = 0; /* default when we don't make any network calls */
232 lockIdSet(&flock, NULL, clid);
234 #if defined(AFS_SGI_ENV)
235 osi_Assert(valusema(&avc->vc_rwlock) <= 0);
236 osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
238 ObtainWriteLock(&avc->lock, 118);
239 if (acom & LOCK_UN) {
244 /* If the lock is held exclusive, then only the owning process
245 * or a child can unlock it. Use pid and ppid because they are
246 * unique identifiers.
248 if ((avc->flockCount < 0) && (getpid() != avc->ownslock)) {
250 if (onlymine || (getppid() != avc->ownslock)) {
252 if (onlymine || (u.u_procp->p_ppid != avc->ownslock)) {
254 ReleaseWriteLock(&avc->lock);
259 if (lockIdcmp2(&flock, avc, NULL, onlymine, clid)) {
260 ReleaseWriteLock(&avc->lock);
266 if (avc->flockCount == 0) {
267 ReleaseWriteLock(&avc->lock);
271 /* unlock the lock */
272 if (avc->flockCount > 0) {
274 for (slp = *slpp; slp;) {
275 if (!lockIdcmp2(&flock, avc, slp, onlymine, clid)) {
277 tlp = *slpp = slp->next;
278 osi_FreeSmallSpace(slp);
285 } else if (avc->flockCount == -1) {
286 afs_StoreAllSegments(avc, areq, AFS_ASYNC); /* fsync file early */
288 /* And remove the (only) exclusive lock entry from the list... */
289 osi_FreeSmallSpace(avc->slocks);
292 if (avc->flockCount == 0) {
294 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
296 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
298 code = RXAFS_ReleaseLock(tc->id, (struct AFSFid *)
299 &avc->fid.Fid, &tsync);
305 (tc, code, &avc->fid, areq,
306 AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL));
309 while (1) { /* set a new lock */
311 * Upgrading from shared locks to Exclusive and vice versa
312 * is a bit tricky and we don't really support it yet. But
313 * we try to support the common used one which is upgrade
314 * a shared lock to an exclusive for the same process...
316 if ((avc->flockCount > 0 && (acom & LOCK_EX))
317 || (avc->flockCount == -1 && (acom & LOCK_SH))) {
319 * Upgrading from shared locks to an exclusive one:
320 * For now if all the shared locks belong to the
321 * same process then we unlock them on the server
322 * and proceed with the upgrade. Unless we change the
323 * server's locking interface impl we prohibit from
324 * unlocking other processes's shared locks...
325 * Upgrading from an exclusive lock to a shared one:
326 * Again only allowed to be done by the same process.
329 for (slp = *slpp; slp;) {
331 (&flock, avc, slp, 1 /*!onlymine */ , clid)) {
336 tlp = *slpp = slp->next;
337 osi_FreeSmallSpace(slp);
345 if (!code && avc->flockCount == 0) {
347 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
350 (AFS_STATS_FS_RPCIDX_RELEASELOCK);
353 RXAFS_ReleaseLock(tc->id,
354 (struct AFSFid *)&avc->fid.
361 (tc, code, &avc->fid, areq,
362 AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK,
365 } else if (avc->flockCount == -1 && (acom & LOCK_EX)) {
366 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
372 /* compatible here, decide if needs to go to file server. If
373 * we've already got the file locked (and thus read-locked, since
374 * we've already checked for compatibility), we shouldn't send
375 * the call through to the server again */
376 if (avc->flockCount == 0) {
377 /* we're the first on our block, send the call through */
378 lockType = ((acom & LOCK_EX) ? LockWrite : LockRead);
380 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
382 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
384 code = RXAFS_SetLock(tc->id, (struct AFSFid *)
385 &avc->fid.Fid, lockType,
392 (tc, code, &avc->fid, areq,
393 AFS_STATS_FS_RPCIDX_SETLOCK, SHARED_LOCK,
396 code = 0; /* otherwise, pretend things worked */
399 slp = (struct SimpleLocks *)
400 osi_AllocSmallSpace(sizeof(struct SimpleLocks));
401 if (acom & LOCK_EX) {
406 /* Record unique id of process owning exclusive lock. */
407 avc->ownslock = getpid();
410 slp->type = LockWrite;
413 avc->flockCount = -1;
415 slp->type = LockRead;
416 slp->next = avc->slocks;
421 lockIdSet(&flock, slp, clid);
424 /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */
425 if (((code == EWOULDBLOCK) || (code == EAGAIN))
426 && !(acom & LOCK_NB)) {
427 /* sleep for a second, allowing interrupts */
428 ReleaseWriteLock(&avc->lock);
429 #if defined(AFS_SGI_ENV)
430 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
432 code = afs_osi_Wait(1000, NULL, 1);
433 #if defined(AFS_SGI_ENV)
434 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
436 ObtainWriteLock(&avc->lock, 120);
438 code = EINTR; /* return this if ^C typed */
445 ReleaseWriteLock(&avc->lock);
446 code = afs_CheckCode(code, areq, 1); /* defeat a buggy AIX optimization */
451 /* warn a user that a lock has been ignored */
452 afs_int32 lastWarnTime = 0; /* this is used elsewhere */
456 register afs_int32 now;
459 AFS_STATCNT(DoLockWarning);
460 /* check if we've already warned someone recently */
461 if (now < lastWarnTime + 120)
464 /* otherwise, it is time to nag the user */
467 ("afs: byte-range lock/unlock ignored; make sure no one else is running this program.\n");
472 int afs_lockctl(struct vcache * avc, struct eflock * af, int flag,
473 struct AFS_UCRED * acred, pid_t clid, off_t offset)
474 #elif defined(AFS_SGI_ENV) || (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
475 int afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd,
476 struct AFS_UCRED * acred, pid_t clid)
479 int afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd,
480 struct AFS_UCRED * acred)
483 struct vrequest treq;
488 struct afs_fakestat_state fakestate;
490 AFS_STATCNT(afs_lockctl);
491 if ((code = afs_InitReq(&treq, acred)))
493 afs_InitFakeStat(&fakestate);
494 code = afs_EvalFakeStat(&avc, &fakestate, &treq);
496 afs_PutFakeStat(&fakestate);
500 if (flag & VNOFLCK) {
501 afs_PutFakeStat(&fakestate);
504 if (flag & CLNFLCK) {
506 } else if ((flag & GETFLCK) || (flag & RGETFLCK)) {
508 } else if ((flag & SETFLCK) || (flag & RSETFLCK)) {
512 #if (defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)) && !defined(AFS_SUN58_ENV)
513 if ((acmd == F_GETLK) || (acmd == F_RGETLK)) {
515 if (acmd == F_GETLK) {
517 if (af->l_type == F_UNLCK) {
518 afs_PutFakeStat(&fakestate);
521 #ifndef AFS_OSF_ENV /* getlock is a no-op for osf (for now) */
522 code = HandleGetLock(avc, af, &treq, clid);
524 code = afs_CheckCode(code, &treq, 2); /* defeat buggy AIX optimz */
525 afs_PutFakeStat(&fakestate);
527 } else if ((acmd == F_SETLK) || (acmd == F_SETLKW)
528 #if (defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)) && !defined(AFS_SUN58_ENV)
529 || (acmd == F_RSETLK) || (acmd == F_RSETLKW)) {
533 /* this next check is safer when left out, but more applications work
534 * with it in. However, they fail in race conditions. The question is
535 * what to do for people who don't have source to their application;
536 * this way at least, they can get work done */
537 #ifdef AFS_LINUX24_ENV
538 if (af->l_len == OFFSET_MAX)
539 af->l_len = 0; /* since some systems indicate it as EOF */
541 if (af->l_len == 0x7fffffff)
542 af->l_len = 0; /* since some systems indicate it as EOF */
543 #ifdef AFS_LINUX_64BIT_KERNEL
544 if (af->l_len == LONG_MAX)
545 af->l_len = 0; /* since some systems indicate it as EOF */
548 /* next line makes byte range locks always succeed,
549 * even when they should block */
550 if (af->l_whence != 0 || af->l_start != 0 || af->l_len != 0) {
552 afs_PutFakeStat(&fakestate);
555 /* otherwise we can turn this into a whole-file flock */
556 if (af->l_type == F_RDLCK)
558 else if (af->l_type == F_WRLCK)
560 else if (af->l_type == F_UNLCK)
563 afs_PutFakeStat(&fakestate);
564 return EINVAL; /* unknown lock type */
566 if (((acmd == F_SETLK)
567 #if (defined(AFS_SGI_ENV) || defined(AFS_SUN_ENV)) && !defined(AFS_SUN58_ENV)
568 || (acmd == F_RSETLK)
570 ) && code != LOCK_UN)
571 code |= LOCK_NB; /* non-blocking, s.v.p. */
572 #if (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)) || defined(AFS_OSF_ENV)
573 code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ );
574 #elif defined(AFS_SGI_ENV)
575 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
576 code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ );
577 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
579 code = HandleFlock(avc, code, &treq, 0, 0 /*!onlymine */ );
581 code = afs_CheckCode(code, &treq, 3); /* defeat AIX -O bug */
582 afs_PutFakeStat(&fakestate);
585 afs_PutFakeStat(&fakestate);
591 * Get a description of the first lock which would
592 * block the lock specified. If the specified lock
593 * would succeed, fill in the lock structure with 'F_UNLCK'.
595 * To do that, we have to ask the server for the lock
597 * 1. The file is not locked by this machine.
598 * 2. Asking for write lock, and only the current
599 * PID has the file read locked.
601 #ifndef AFS_OSF_ENV /* getlock is a no-op for osf (for now) */
603 HandleGetLock(register struct vcache *avc, register struct AFS_FLOCK *af,
604 register struct vrequest *areq, int clid)
606 register afs_int32 code;
607 struct AFS_FLOCK flock;
609 lockIdSet(&flock, NULL, clid);
611 ObtainWriteLock(&avc->lock, 122);
612 if (avc->flockCount == 0) {
614 * We don't know ourselves, so ask the server. Unfortunately, we
615 * don't know the pid. Not even the server knows the pid. Besides,
616 * the process with the lock is on another machine
618 code = GetFlockCount(avc, areq);
619 if (code == 0 || (af->l_type == F_RDLCK && code > 0)) {
620 af->l_type = F_UNLCK;
624 af->l_type = F_RDLCK;
626 af->l_type = F_WRLCK;
629 #if defined(AFS_HAVE_FLOCK_SYSID)
635 if (af->l_type == F_RDLCK) {
637 * We want a read lock. If there are only
638 * read locks, or we are the one with the
639 * write lock, say it is unlocked.
641 if (avc->flockCount > 0 || /* only read locks */
642 !lockIdcmp2(&flock, avc, NULL, 1, clid)) {
643 af->l_type = F_UNLCK;
647 /* one write lock, but who? */
648 af->l_type = F_WRLCK; /* not us, so lock would block */
649 if (avc->slocks) { /* we know who, so tell */
650 af->l_pid = avc->slocks->pid;
651 #if defined(AFS_HAVE_FLOCK_SYSID)
652 af->l_sysid = avc->slocks->sysid;
655 af->l_pid = 0; /* XXX can't happen?? */
656 #if defined(AFS_HAVE_FLOCK_SYSID)
664 * Ok, we want a write lock. If there is a write lock
665 * already, and it is not this process, we fail.
667 if (avc->flockCount < 0) {
668 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
669 af->l_type = F_WRLCK;
671 af->l_pid = avc->slocks->pid;
672 #if defined(AFS_HAVE_FLOCK_SYSID)
673 af->l_sysid = avc->slocks->sysid;
676 af->l_pid = 0; /* XXX can't happen?? */
677 #if defined(AFS_HAVE_FLOCK_SYSID)
683 /* we are the one with the write lock */
684 af->l_type = F_UNLCK;
689 * Want a write lock, and we know there are read locks.
690 * If there is more than one, or it isn't us, we cannot lock.
692 if ((avc->flockCount > 1)
693 || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
694 struct SimpleLocks *slp;
696 af->l_type = F_RDLCK;
698 #if defined(AFS_HAVE_FLOCK_SYSID)
701 /* find a pid that isn't our own */
702 for (slp = avc->slocks; slp; slp = slp->next) {
703 if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
704 af->l_pid = slp->pid;
705 #if defined(AFS_HAVE_FLOCK_SYSID)
706 af->l_sysid = avc->slocks->sysid;
715 * Ok, we want a write lock. If there is a write lock
716 * already, and it is not this process, we fail.
718 if (avc->flockCount < 0) {
719 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
720 af->l_type = F_WRLCK;
722 af->l_pid = avc->slocks->pid;
723 #if defined(AFS_HAVE_FLOCK_SYSID)
724 af->l_sysid = avc->slocks->sysid;
727 af->l_pid = 0; /* XXX can't happen?? */
728 #if defined(AFS_HAVE_FLOCK_SYSID)
734 /* we are the one with the write lock */
735 af->l_type = F_UNLCK;
740 * Want a write lock, and we know there are read locks.
741 * If there is more than one, or it isn't us, we cannot lock.
743 if ((avc->flockCount > 1)
744 || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
745 struct SimpleLocks *slp;
746 af->l_type = F_RDLCK;
748 #if defined(AFS_HAVE_FLOCK_SYSID)
751 /* find a pid that isn't our own */
752 for (slp = avc->slocks; slp; slp = slp->next) {
753 if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
754 af->l_pid = slp->pid;
755 #if defined(AFS_HAVE_FLOCK_SYSID)
756 af->l_sysid = avc->slocks->sysid;
765 * Want a write lock, and there is just one read lock, and it
766 * is this process with a read lock. Ask the server if there
767 * are any more processes with the file locked.
769 code = GetFlockCount(avc, areq);
770 if (code == 0 || code == 1) {
771 af->l_type = F_UNLCK;
775 af->l_type = F_RDLCK;
777 af->l_type = F_WRLCK;
779 #if defined(AFS_HAVE_FLOCK_SYSID)
786 af->l_len = 0; /* to end of file */
789 ReleaseWriteLock(&avc->lock);
793 /* Get the 'flock' count from the server. This comes back in a 'spare'
794 * field from a GetStatus RPC. If we have any problems with the RPC,
795 * we lie and say the file is unlocked. If we ask any 'old' fileservers,
796 * the spare field will be a zero, saying the file is unlocked. This is
797 * OK, as a further 'lock' request will do the right thing.
800 GetFlockCount(struct vcache *avc, struct vrequest *areq)
802 register struct conn *tc;
803 register afs_int32 code;
804 struct AFSFetchStatus OutStatus;
805 struct AFSCallBack CallBack;
806 struct AFSVolSync tsync;
809 temp = areq->flags & O_NONBLOCK;
810 areq->flags |= O_NONBLOCK;
813 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
815 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
818 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&avc->fid.Fid,
819 &OutStatus, &CallBack, &tsync);
825 (tc, code, &avc->fid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
829 areq->flags &= ~O_NONBLOCK;
832 return (0); /* failed, say it is 'unlocked' */
834 return ((int)OutStatus.lockCount);
840 #if !defined(AFS_AIX_ENV) && !defined(AFS_HPUX_ENV) && !defined(AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(UKERNEL) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
841 /* Flock not support on System V systems */
843 extern struct fileops afs_fileops;
846 afs_xflock(struct proc *p, void *args, int *retval)
847 #else /* AFS_OSF_ENV */
858 struct vrequest treq;
861 struct afs_fakestat_state fakestate;
863 afs_InitFakeStat(&fakestate);
864 AFS_STATCNT(afs_xflock);
867 uap = (struct a *)args;
868 getf(&fd, uap->fd, FILE_FLAGS_NULL, &u.u_file_state);
869 #else /* AFS_OSF_ENV */
870 uap = (struct a *)u.u_ap;
874 afs_PutFakeStat(&fakestate);
878 if (flockDone = afs_InitReq(&treq, u.u_cred)) {
879 afs_PutFakeStat(&fakestate);
882 /* first determine whether this is any sort of vnode */
883 if (fd->f_type == DTYPE_VNODE) {
884 /* good, this is a vnode; next see if it is an AFS vnode */
885 tvc = VTOAFS(fd->f_data); /* valid, given a vnode */
886 if (IsAfsVnode(AFSTOV(tvc))) {
887 /* This is an AFS vnode, so do the work */
889 /* find real vcache entry; shouldn't be null if gnode ref count
892 tvc = VTOAFS(afs_gntovn) (tvc);
895 afs_PutFakeStat(&fakestate);
899 code = afs_EvalFakeStat(&tvc, &fakestate, &treq);
901 afs_PutFakeStat(&fakestate);
904 if ((fd->f_flag & (FEXLOCK | FSHLOCK)) && !(uap->com & LOCK_UN)) {
905 /* First, if fd already has lock, release it for relock path */
906 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV))
907 HandleFlock(tvc, LOCK_UN, &treq, u.u_procp->p_pid,
910 HandleFlock(tvc, LOCK_UN, &treq, 0, 0 /*!onlymine */ );
912 fd->f_flag &= ~(FEXLOCK | FSHLOCK);
914 /* now try the requested operation */
916 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV))
918 HandleFlock(tvc, uap->com, &treq, u.u_procp->p_pid,
921 code = HandleFlock(tvc, uap->com, &treq, 0, 0 /*!onlymine */ );
927 if (uap->com & LOCK_UN) {
929 fd->f_flag &= ~(FEXLOCK | FSHLOCK);
933 #else /* AFS_OSF_ENV */
936 if (uap->com & LOCK_SH)
937 fd->f_flag |= FSHLOCK;
938 else if (uap->com & LOCK_EX)
939 fd->f_flag |= FEXLOCK;
943 fd->f_ops = &afs_fileops;
948 code = flock(p, args, retval);
954 afs_PutFakeStat(&fakestate);
956 #else /* AFS_OSF_ENV */
959 (*afs_longcall_procs.LC_flock) ();
963 afs_PutFakeStat(&fakestate);
967 #endif /* !defined(AFS_AIX_ENV) && !defined(AFS_HPUX_ENV) && !defined(AFS_SUN5_ENV) && !defined(UKERNEL) && !defined(AFS_LINUX20_ENV) */