2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
15 #include <afsconfig.h>
16 #include "afs/param.h"
21 #include "afs/sysincludes.h" /* Standard vendor system headers */
22 #include "afsincludes.h" /* Afs-based standard headers */
23 #include "afs/afs_stats.h" /* statistics */
24 #include "afs/afs_cbqueue.h"
25 #include "afs/nfsclient.h"
26 #include "afs/afs_osidnlc.h"
28 /* Static prototypes */
29 static int HandleGetLock(register struct vcache *avc,
30 register struct AFS_FLOCK *af,
31 register struct vrequest *areq, int clid);
32 static int GetFlockCount(struct vcache *avc, struct vrequest *areq);
33 static int lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
34 register struct SimpleLocks *alp, int onlymine,
36 static void DoLockWarning(void);
38 /* int clid; * non-zero on SGI, OSF, SunOS, Darwin, xBSD ** XXX ptr type */
40 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
42 #if defined(AFS_SUN5_ENV)
43 register proc_t *procp = ttoproc(curthread);
45 #if !defined(AFS_AIX41_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_SGI65_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
47 struct proc *procp = OSI_GET_CURRENT_PROCP();
49 struct proc *procp = u.u_procp;
50 #endif /* AFS_SGI_ENV */
53 #if defined(AFS_SGI65_ENV)
55 get_current_flid(&flid);
64 slp->sysid = u.u_sysid;
68 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV)
71 slp->pid = procp->p_pid;
73 slp->sysid = procp->p_sysid;
74 slp->pid = procp->p_epid;
77 #if defined(AFS_SGI_ENV)
79 slp->sysid = flid.fl_sysid;
81 slp->sysid = OSI_GET_CURRENT_SYSID();
85 #if defined(AFS_SUN_ENV) || defined(AFS_OSF_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
88 #if defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
91 slp->pid = u.u_procp->p_pid;
94 #endif /* AFS_AIX_ENV */
95 #endif /* AFS_AIX32_ENV */
98 #if defined(AFS_AIX32_ENV)
101 flock->l_pid = getpid();
103 flock->l_sysid = u.u_sysid;
104 flock->l_pid = u.u_epid;
107 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV)
110 flock->l_pid = procp->p_pid;
112 flock->l_sysid = procp->p_sysid;
113 flock->l_pid = procp->p_epid;
116 #if defined(AFS_SGI_ENV)
118 flock->l_sysid = flid.fl_sysid;
120 flock->l_sysid = OSI_GET_CURRENT_SYSID();
124 #if defined(AFS_SUN_ENV) || defined(AFS_OSF_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
127 #if defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
128 flock->l_pid = getpid();
130 flock->l_pid = u.u_procp->p_pid;
134 #endif /* AFS_AIX_ENV */
135 #endif /* AFS_AIX32_ENV */
139 /* return 1 (true) if specified flock does not match alp (if
140 * specified), or any of the slp structs (if alp == 0)
142 /* I'm not sure that the comparsion of flock->pid to p_ppid
143 * is correct. Should that be a comparision of alp (or slp) ->pid
144 * to p_ppid? Especially in the context of the lower loop, where
145 * the repeated comparison doesn't make much sense...
147 /* onlymine - don't match any locks which are held by my parent */
148 /* clid - only irix 6.5 */
151 lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
152 register struct SimpleLocks *alp, int onlymine, int clid)
154 register struct SimpleLocks *slp;
155 #if defined(AFS_SUN5_ENV)
156 register proc_t *procp = ttoproc(curthread);
158 #if !defined(AFS_AIX41_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_SGI65_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
160 struct proc *procp = curprocp;
161 #else /* AFS_SGI64_ENV */
162 struct proc *procp = u.u_procp;
163 #endif /* AFS_SGI64_ENV */
168 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
169 if (flock1->l_sysid != alp->sysid) {
173 if ((flock1->l_pid == alp->pid) ||
174 #if defined(AFS_AIX41_ENV) || defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
175 (!onlymine && (flock1->l_pid == getppid()))
177 #if defined(AFS_SGI65_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
178 /* XXX check this. used to be *only* irix for some reason. */
179 (!onlymine && (flock1->l_pid == clid))
181 (!onlymine && (flock1->l_pid == procp->p_ppid))
190 for (slp = vp->slocks; slp; slp = slp->next) {
191 #if defined(AFS_HAVE_FLOCK_SYSID)
192 if (flock1->l_sysid != slp->sysid) {
196 if (flock1->l_pid == slp->pid) {
200 return (1); /* failure */
204 /* we don't send multiple read flocks to the server, but rather just count
205 them up ourselves. Of course, multiple write locks are incompatible.
207 Note that we should always try to release a lock, even if we have
208 a network problem sending the release command through, since often
209 a lock is released on a close call, when the user can't retry anyway.
211 After we remove it from our structure, the lock will no longer be
212 kept alive, and the server should time it out within a few minutes.
214 94.04.13 add "force" parameter. If a child explicitly unlocks a
215 file, I guess we'll permit it. however, we don't want simple,
216 innocent closes by children to unlock files in the parent process.
218 /* clid - nonzero on sgi sunos osf1 only */
220 HandleFlock(register struct vcache *avc, int acom, struct vrequest *areq,
221 pid_t clid, int onlymine)
224 struct SimpleLocks *slp, *tlp, **slpp;
226 struct AFSVolSync tsync;
228 struct AFS_FLOCK flock;
229 XSTATS_DECLS AFS_STATCNT(HandleFlock);
230 code = 0; /* default when we don't make any network calls */
231 lockIdSet(&flock, NULL, clid);
233 #if defined(AFS_SGI_ENV)
234 osi_Assert(valusema(&avc->vc_rwlock) <= 0);
235 osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
237 ObtainWriteLock(&avc->lock, 118);
238 if (acom & LOCK_UN) {
243 /* If the lock is held exclusive, then only the owning process
244 * or a child can unlock it. Use pid and ppid because they are
245 * unique identifiers.
247 if ((avc->flockCount < 0) && (getpid() != avc->ownslock)) {
249 if (onlymine || (getppid() != avc->ownslock)) {
251 if (onlymine || (u.u_procp->p_ppid != avc->ownslock)) {
253 ReleaseWriteLock(&avc->lock);
258 if (lockIdcmp2(&flock, avc, NULL, onlymine, clid)) {
259 ReleaseWriteLock(&avc->lock);
265 if (avc->flockCount == 0) {
266 ReleaseWriteLock(&avc->lock);
270 /* unlock the lock */
271 if (avc->flockCount > 0) {
273 for (slp = *slpp; slp;) {
274 if (!lockIdcmp2(&flock, avc, slp, onlymine, clid)) {
276 tlp = *slpp = slp->next;
277 osi_FreeSmallSpace(slp);
284 } else if (avc->flockCount == -1) {
285 afs_StoreAllSegments(avc, areq, AFS_ASYNC); /* fsync file early */
287 /* And remove the (only) exclusive lock entry from the list... */
288 osi_FreeSmallSpace(avc->slocks);
291 if (avc->flockCount == 0) {
293 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
295 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
297 code = RXAFS_ReleaseLock(tc->id, (struct AFSFid *)
298 &avc->fid.Fid, &tsync);
304 (tc, code, &avc->fid, areq,
305 AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL));
308 while (1) { /* set a new lock */
310 * Upgrading from shared locks to Exclusive and vice versa
311 * is a bit tricky and we don't really support it yet. But
312 * we try to support the common used one which is upgrade
313 * a shared lock to an exclusive for the same process...
315 if ((avc->flockCount > 0 && (acom & LOCK_EX))
316 || (avc->flockCount == -1 && (acom & LOCK_SH))) {
318 * Upgrading from shared locks to an exclusive one:
319 * For now if all the shared locks belong to the
320 * same process then we unlock them on the server
321 * and proceed with the upgrade. Unless we change the
322 * server's locking interface impl we prohibit from
323 * unlocking other processes's shared locks...
324 * Upgrading from an exclusive lock to a shared one:
325 * Again only allowed to be done by the same process.
328 for (slp = *slpp; slp;) {
330 (&flock, avc, slp, 1 /*!onlymine */ , clid)) {
335 tlp = *slpp = slp->next;
336 osi_FreeSmallSpace(slp);
344 if (!code && avc->flockCount == 0) {
346 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
349 (AFS_STATS_FS_RPCIDX_RELEASELOCK);
352 RXAFS_ReleaseLock(tc->id,
353 (struct AFSFid *)&avc->fid.
360 (tc, code, &avc->fid, areq,
361 AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK,
364 } else if (avc->flockCount == -1 && (acom & LOCK_EX)) {
365 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
371 /* compatible here, decide if needs to go to file server. If
372 * we've already got the file locked (and thus read-locked, since
373 * we've already checked for compatibility), we shouldn't send
374 * the call through to the server again */
375 if (avc->flockCount == 0) {
376 /* we're the first on our block, send the call through */
377 lockType = ((acom & LOCK_EX) ? LockWrite : LockRead);
379 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
381 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
383 code = RXAFS_SetLock(tc->id, (struct AFSFid *)
384 &avc->fid.Fid, lockType,
391 (tc, code, &avc->fid, areq,
392 AFS_STATS_FS_RPCIDX_SETLOCK, SHARED_LOCK,
395 code = 0; /* otherwise, pretend things worked */
398 slp = (struct SimpleLocks *)
399 osi_AllocSmallSpace(sizeof(struct SimpleLocks));
400 if (acom & LOCK_EX) {
405 /* Record unique id of process owning exclusive lock. */
406 avc->ownslock = getpid();
409 slp->type = LockWrite;
412 avc->flockCount = -1;
414 slp->type = LockRead;
415 slp->next = avc->slocks;
420 lockIdSet(&flock, slp, clid);
423 /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */
424 if (((code == EWOULDBLOCK) || (code == EAGAIN))
425 && !(acom & LOCK_NB)) {
426 /* sleep for a second, allowing interrupts */
427 ReleaseWriteLock(&avc->lock);
428 #if defined(AFS_SGI_ENV)
429 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
431 code = afs_osi_Wait(1000, NULL, 1);
432 #if defined(AFS_SGI_ENV)
433 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
435 ObtainWriteLock(&avc->lock, 120);
437 code = EINTR; /* return this if ^C typed */
444 ReleaseWriteLock(&avc->lock);
445 code = afs_CheckCode(code, areq, 1); /* defeat a buggy AIX optimization */
450 /* warn a user that a lock has been ignored */
451 afs_int32 lastWarnTime = 0; /* this is used elsewhere */
455 register afs_int32 now;
458 AFS_STATCNT(DoLockWarning);
459 /* check if we've already warned someone recently */
460 if (now < lastWarnTime + 120)
463 /* otherwise, it is time to nag the user */
466 ("afs: byte-range lock/unlock ignored; make sure no one else is running this program.\n");
471 afs_lockctl(struct vcache * avc, struct eflock * af, int flag,
472 struct AFS_UCRED * acred, pid_t clid, off_t offset)
473 #elif defined(AFS_SGI_ENV) || (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
474 afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd,
475 struct AFS_UCRED * acred, pid_t clid)
478 afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd,
479 struct AFS_UCRED * acred)
482 struct vrequest treq;
487 struct afs_fakestat_state fakestate;
489 AFS_STATCNT(afs_lockctl);
490 if ((code = afs_InitReq(&treq, acred)))
492 afs_InitFakeStat(&fakestate);
493 code = afs_EvalFakeStat(&avc, &fakestate, &treq);
495 afs_PutFakeStat(&fakestate);
499 if (flag & VNOFLCK) {
500 afs_PutFakeStat(&fakestate);
503 if (flag & CLNFLCK) {
505 } else if ((flag & GETFLCK) || (flag & RGETFLCK)) {
507 } else if ((flag & SETFLCK) || (flag & RSETFLCK)) {
511 #if (defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)) && !defined(AFS_SUN58_ENV)
512 if ((acmd == F_GETLK) || (acmd == F_RGETLK)) {
514 if (acmd == F_GETLK) {
516 if (af->l_type == F_UNLCK) {
517 afs_PutFakeStat(&fakestate);
520 #ifndef AFS_OSF_ENV /* getlock is a no-op for osf (for now) */
521 code = HandleGetLock(avc, af, &treq, clid);
523 code = afs_CheckCode(code, &treq, 2); /* defeat buggy AIX optimz */
524 afs_PutFakeStat(&fakestate);
526 } else if ((acmd == F_SETLK) || (acmd == F_SETLKW)
527 #if (defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)) && !defined(AFS_SUN58_ENV)
528 || (acmd == F_RSETLK) || (acmd == F_RSETLKW)) {
532 /* this next check is safer when left out, but more applications work
533 * with it in. However, they fail in race conditions. The question is
534 * what to do for people who don't have source to their application;
535 * this way at least, they can get work done */
536 #ifdef AFS_LINUX24_ENV
537 if (af->l_len == OFFSET_MAX)
538 af->l_len = 0; /* since some systems indicate it as EOF */
540 if (af->l_len == 0x7fffffff)
541 af->l_len = 0; /* since some systems indicate it as EOF */
542 #ifdef AFS_LINUX_64BIT_KERNEL
543 if (af->l_len == LONG_MAX)
544 af->l_len = 0; /* since some systems indicate it as EOF */
547 /* next line makes byte range locks always succeed,
548 * even when they should block */
549 if (af->l_whence != 0 || af->l_start != 0 || af->l_len != 0) {
551 afs_PutFakeStat(&fakestate);
554 /* otherwise we can turn this into a whole-file flock */
555 if (af->l_type == F_RDLCK)
557 else if (af->l_type == F_WRLCK)
559 else if (af->l_type == F_UNLCK)
562 afs_PutFakeStat(&fakestate);
563 return EINVAL; /* unknown lock type */
565 if (((acmd == F_SETLK)
566 #if (defined(AFS_SGI_ENV) || defined(AFS_SUN_ENV)) && !defined(AFS_SUN58_ENV)
567 || (acmd == F_RSETLK)
569 ) && code != LOCK_UN)
570 code |= LOCK_NB; /* non-blocking, s.v.p. */
571 #if (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV)) || defined(AFS_OSF_ENV)
572 code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ );
573 #elif defined(AFS_SGI_ENV)
574 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
575 code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ );
576 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
578 code = HandleFlock(avc, code, &treq, 0, 0 /*!onlymine */ );
580 code = afs_CheckCode(code, &treq, 3); /* defeat AIX -O bug */
581 afs_PutFakeStat(&fakestate);
584 afs_PutFakeStat(&fakestate);
590 * Get a description of the first lock which would
591 * block the lock specified. If the specified lock
592 * would succeed, fill in the lock structure with 'F_UNLCK'.
594 * To do that, we have to ask the server for the lock
596 * 1. The file is not locked by this machine.
597 * 2. Asking for write lock, and only the current
598 * PID has the file read locked.
600 #ifndef AFS_OSF_ENV /* getlock is a no-op for osf (for now) */
602 HandleGetLock(register struct vcache *avc, register struct AFS_FLOCK *af,
603 register struct vrequest *areq, int clid)
605 register afs_int32 code;
606 struct AFS_FLOCK flock;
608 lockIdSet(&flock, NULL, clid);
610 ObtainWriteLock(&avc->lock, 122);
611 if (avc->flockCount == 0) {
613 * We don't know ourselves, so ask the server. Unfortunately, we
614 * don't know the pid. Not even the server knows the pid. Besides,
615 * the process with the lock is on another machine
617 code = GetFlockCount(avc, areq);
618 if (code == 0 || (af->l_type == F_RDLCK && code > 0)) {
619 af->l_type = F_UNLCK;
623 af->l_type = F_RDLCK;
625 af->l_type = F_WRLCK;
628 #if defined(AFS_HAVE_FLOCK_SYSID)
634 if (af->l_type == F_RDLCK) {
636 * We want a read lock. If there are only
637 * read locks, or we are the one with the
638 * write lock, say it is unlocked.
640 if (avc->flockCount > 0 || /* only read locks */
641 !lockIdcmp2(&flock, avc, NULL, 1, clid)) {
642 af->l_type = F_UNLCK;
646 /* one write lock, but who? */
647 af->l_type = F_WRLCK; /* not us, so lock would block */
648 if (avc->slocks) { /* we know who, so tell */
649 af->l_pid = avc->slocks->pid;
650 #if defined(AFS_HAVE_FLOCK_SYSID)
651 af->l_sysid = avc->slocks->sysid;
654 af->l_pid = 0; /* XXX can't happen?? */
655 #if defined(AFS_HAVE_FLOCK_SYSID)
663 * Ok, we want a write lock. If there is a write lock
664 * already, and it is not this process, we fail.
666 if (avc->flockCount < 0) {
667 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
668 af->l_type = F_WRLCK;
670 af->l_pid = avc->slocks->pid;
671 #if defined(AFS_HAVE_FLOCK_SYSID)
672 af->l_sysid = avc->slocks->sysid;
675 af->l_pid = 0; /* XXX can't happen?? */
676 #if defined(AFS_HAVE_FLOCK_SYSID)
682 /* we are the one with the write lock */
683 af->l_type = F_UNLCK;
688 * Want a write lock, and we know there are read locks.
689 * If there is more than one, or it isn't us, we cannot lock.
691 if ((avc->flockCount > 1)
692 || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
693 struct SimpleLocks *slp;
695 af->l_type = F_RDLCK;
697 #if defined(AFS_HAVE_FLOCK_SYSID)
700 /* find a pid that isn't our own */
701 for (slp = avc->slocks; slp; slp = slp->next) {
702 if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
703 af->l_pid = slp->pid;
704 #if defined(AFS_HAVE_FLOCK_SYSID)
705 af->l_sysid = avc->slocks->sysid;
714 * Ok, we want a write lock. If there is a write lock
715 * already, and it is not this process, we fail.
717 if (avc->flockCount < 0) {
718 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
719 af->l_type = F_WRLCK;
721 af->l_pid = avc->slocks->pid;
722 #if defined(AFS_HAVE_FLOCK_SYSID)
723 af->l_sysid = avc->slocks->sysid;
726 af->l_pid = 0; /* XXX can't happen?? */
727 #if defined(AFS_HAVE_FLOCK_SYSID)
733 /* we are the one with the write lock */
734 af->l_type = F_UNLCK;
739 * Want a write lock, and we know there are read locks.
740 * If there is more than one, or it isn't us, we cannot lock.
742 if ((avc->flockCount > 1)
743 || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
744 struct SimpleLocks *slp;
745 af->l_type = F_RDLCK;
747 #if defined(AFS_HAVE_FLOCK_SYSID)
750 /* find a pid that isn't our own */
751 for (slp = avc->slocks; slp; slp = slp->next) {
752 if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
753 af->l_pid = slp->pid;
754 #if defined(AFS_HAVE_FLOCK_SYSID)
755 af->l_sysid = avc->slocks->sysid;
764 * Want a write lock, and there is just one read lock, and it
765 * is this process with a read lock. Ask the server if there
766 * are any more processes with the file locked.
768 code = GetFlockCount(avc, areq);
769 if (code == 0 || code == 1) {
770 af->l_type = F_UNLCK;
774 af->l_type = F_RDLCK;
776 af->l_type = F_WRLCK;
778 #if defined(AFS_HAVE_FLOCK_SYSID)
785 af->l_len = 0; /* to end of file */
788 ReleaseWriteLock(&avc->lock);
792 /* Get the 'flock' count from the server. This comes back in a 'spare'
793 * field from a GetStatus RPC. If we have any problems with the RPC,
794 * we lie and say the file is unlocked. If we ask any 'old' fileservers,
795 * the spare field will be a zero, saying the file is unlocked. This is
796 * OK, as a further 'lock' request will do the right thing.
799 GetFlockCount(struct vcache *avc, struct vrequest *areq)
801 register struct conn *tc;
802 register afs_int32 code;
803 struct AFSFetchStatus OutStatus;
804 struct AFSCallBack CallBack;
805 struct AFSVolSync tsync;
807 XSTATS_DECLS temp = areq->flags & O_NONBLOCK;
808 areq->flags |= O_NONBLOCK;
811 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
813 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
816 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&avc->fid.Fid,
817 &OutStatus, &CallBack, &tsync);
823 (tc, code, &avc->fid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
827 areq->flags &= ~O_NONBLOCK;
830 return (0); /* failed, say it is 'unlocked' */
832 return ((int)OutStatus.lockCount);
838 #if !defined(AFS_AIX_ENV) && !defined(AFS_HPUX_ENV) && !defined(AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(UKERNEL) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
839 /* Flock not support on System V systems */
841 extern struct fileops afs_fileops;
844 afs_xflock(struct proc *p, void *args, int *retval)
845 #else /* AFS_OSF_ENV */
856 struct vrequest treq;
859 struct afs_fakestat_state fakestate;
861 afs_InitFakeStat(&fakestate);
862 AFS_STATCNT(afs_xflock);
865 uap = (struct a *)args;
866 getf(&fd, uap->fd, FILE_FLAGS_NULL, &u.u_file_state);
867 #else /* AFS_OSF_ENV */
868 uap = (struct a *)u.u_ap;
872 afs_PutFakeStat(&fakestate);
876 if (flockDone = afs_InitReq(&treq, u.u_cred)) {
877 afs_PutFakeStat(&fakestate);
880 /* first determine whether this is any sort of vnode */
881 if (fd->f_type == DTYPE_VNODE) {
882 /* good, this is a vnode; next see if it is an AFS vnode */
883 tvc = VTOAFS(fd->f_data); /* valid, given a vnode */
884 if (IsAfsVnode(AFSTOV(tvc))) {
885 /* This is an AFS vnode, so do the work */
887 /* find real vcache entry; shouldn't be null if gnode ref count
890 tvc = VTOAFS(afs_gntovn) (tvc);
893 afs_PutFakeStat(&fakestate);
897 code = afs_EvalFakeStat(&tvc, &fakestate, &treq);
899 afs_PutFakeStat(&fakestate);
902 if ((fd->f_flag & (FEXLOCK | FSHLOCK)) && !(uap->com & LOCK_UN)) {
903 /* First, if fd already has lock, release it for relock path */
904 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV))
905 HandleFlock(tvc, LOCK_UN, &treq, u.u_procp->p_pid,
908 HandleFlock(tvc, LOCK_UN, &treq, 0, 0 /*!onlymine */ );
910 fd->f_flag &= ~(FEXLOCK | FSHLOCK);
912 /* now try the requested operation */
914 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV))
916 HandleFlock(tvc, uap->com, &treq, u.u_procp->p_pid,
919 code = HandleFlock(tvc, uap->com, &treq, 0, 0 /*!onlymine */ );
925 if (uap->com & LOCK_UN) {
927 fd->f_flag &= ~(FEXLOCK | FSHLOCK);
931 #else /* AFS_OSF_ENV */
934 if (uap->com & LOCK_SH)
935 fd->f_flag |= FSHLOCK;
936 else if (uap->com & LOCK_EX)
937 fd->f_flag |= FEXLOCK;
941 fd->f_ops = &afs_fileops;
946 code = flock(p, args, retval);
952 afs_PutFakeStat(&fakestate);
954 #else /* AFS_OSF_ENV */
957 (*afs_longcall_procs.LC_flock) ();
961 afs_PutFakeStat(&fakestate);
965 #endif /* !defined(AFS_AIX_ENV) && !defined(AFS_HPUX_ENV) && !defined(AFS_SUN5_ENV) && !defined(UKERNEL) && !defined(AFS_LINUX20_ENV) */