2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
15 #include "../afs/param.h" /* Should be always first */
16 #include "../afs/sysincludes.h" /* Standard vendor system headers */
17 #include "../afs/afsincludes.h" /* Afs-based standard headers */
18 #include "../afs/afs_stats.h" /* statistics */
19 #include "../afs/afs_cbqueue.h"
20 #include "../afs/nfsclient.h"
21 #include "../afs/afs_osidnlc.h"
23 #if defined(AFS_HPUX102_ENV)
24 #define AFS_FLOCK k_flock
26 #if defined(AFS_SUN56_ENV)
27 #define AFS_FLOCK flock64
29 #define AFS_FLOCK flock
30 #endif /* AFS_SUN65_ENV */
31 #endif /* AFS_HPUX102_ENV */
33 static int GetFlockCount(struct vcache *avc, struct vrequest *areq);
35 void lockIdSet(flock, slp, clid)
36 int clid; /* non-zero on SGI, OSF, SunOS */
37 struct SimpleLocks *slp;
38 struct AFS_FLOCK *flock;
40 #if defined(AFS_SUN5_ENV)
41 register proc_t *procp = ttoproc(curthread);
43 #if !defined(AFS_AIX41_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_SGI65_ENV)
45 struct proc *procp = OSI_GET_CURRENT_PROCP();
47 struct proc *procp = u.u_procp;
48 #endif /* AFS_SGI_ENV */
51 #if defined(AFS_SGI65_ENV)
53 get_current_flid(&flid);
62 slp->sysid = u.u_sysid;
66 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV)
69 slp->pid = procp->p_pid;
71 slp->sysid = procp->p_sysid;
72 slp->pid = procp->p_epid;
75 #if defined(AFS_SGI_ENV)
77 slp->sysid = flid.fl_sysid;
79 slp->sysid = OSI_GET_CURRENT_SYSID();
83 #if defined(AFS_SUN_ENV) || defined(AFS_OSF_ENV)
86 #if defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
89 slp->pid = u.u_procp->p_pid;
92 #endif /* AFS_AIX_ENV */
93 #endif /* AFS_AIX32_ENV */
96 #if defined(AFS_AIX32_ENV)
99 flock->l_pid = getpid();
101 flock->l_sysid = u.u_sysid;
102 flock->l_pid = u.u_epid;
105 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV)
108 flock->l_pid = procp->p_pid;
110 flock->l_sysid = procp->p_sysid;
111 flock->l_pid = procp->p_epid;
114 #if defined(AFS_SGI_ENV)
116 flock->l_sysid = flid.fl_sysid;
118 flock->l_sysid = OSI_GET_CURRENT_SYSID();
122 #if defined(AFS_SUN_ENV) || defined(AFS_OSF_ENV)
125 #if defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
126 flock->l_pid = getpid();
128 flock->l_pid = u.u_procp->p_pid;
132 #endif /* AFS_AIX_ENV */
133 #endif /* AFS_AIX32_ENV */
137 /* return 1 (true) if specified flock does not match alp (if
138 * specified), or any of the slp structs (if alp == 0)
140 /* I'm not sure that the comparsion of flock->pid to p_ppid
141 * is correct. Should that be a comparision of alp (or slp) ->pid
142 * to p_ppid? Especially in the context of the lower loop, where
143 * the repeated comparison doesn't make much sense...
145 static int lockIdcmp2(flock1, vp, alp, onlymine, clid)
146 struct AFS_FLOCK *flock1;
148 register struct SimpleLocks *alp;
149 int onlymine; /* don't match any locks which are held by my */
151 int clid; /* Only Irix 6.5 for now. */
153 register struct SimpleLocks *slp;
154 #if defined(AFS_SUN5_ENV)
155 register proc_t *procp = ttoproc(curthread);
157 #if !defined(AFS_AIX41_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_SGI65_ENV)
159 struct proc *procp = curprocp;
160 #else /* AFS_SGI64_ENV */
161 struct proc *procp = u.u_procp;
162 #endif /* AFS_SGI64_ENV */
168 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
169 if (flock1->l_sysid != alp->sysid) {
173 if ((flock1->l_pid == alp->pid) ||
174 #if defined(AFS_AIX41_ENV) || defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
175 (!onlymine && (flock1->l_pid == getppid()))
177 #if defined(AFS_SGI65_ENV)
178 (!onlymine && (flock1->l_pid == clid))
180 (!onlymine && (flock1->l_pid == procp->p_ppid))
189 for (slp = vp->slocks; slp; slp = slp->next) {
190 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
191 if (flock1->l_sysid != slp->sysid) {
195 if (flock1->l_pid == slp->pid) {
199 return (1); /* failure */
203 /* we don't send multiple read flocks to the server, but rather just count
204 them up ourselves. Of course, multiple write locks are incompatible.
206 Note that we should always try to release a lock, even if we have
207 a network problem sending the release command through, since often
208 a lock is released on a close call, when the user can't retry anyway.
210 After we remove it from our structure, the lock will no longer be
211 kept alive, and the server should time it out within a few minutes.
213 94.04.13 add "force" parameter. If a child explicitly unlocks a
214 file, I guess we'll permit it. however, we don't want simple,
215 innocent closes by children to unlock files in the parent process.
217 HandleFlock(avc, acom, areq, clid, onlymine)
218 pid_t clid; /* non-zero on SGI, SunOS, OSF1 only */
219 register struct vcache *avc;
220 struct vrequest *areq;
224 struct SimpleLocks *slp, *tlp, **slpp;
226 struct AFSVolSync tsync;
228 struct AFS_FLOCK flock;
231 AFS_STATCNT(HandleFlock);
232 code = 0; /* default when we don't make any network calls */
233 lockIdSet(&flock, (struct SimpleLocks *)0, clid);
235 #if defined(AFS_SGI_ENV)
236 osi_Assert(valusema(&avc->vc_rwlock) <= 0);
237 osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
239 ObtainWriteLock(&avc->lock,118);
240 if (acom & LOCK_UN) {
245 /* If the lock is held exclusive, then only the owning process
246 * or a child can unlock it. Use pid and ppid because they are
247 * unique identifiers.
249 if ((avc->flockCount < 0) && (getpid() != avc->ownslock)) {
251 if (onlymine || (getppid() != avc->ownslock)) {
253 if (onlymine || (u.u_procp->p_ppid != avc->ownslock)) {
255 ReleaseWriteLock(&avc->lock);
260 if (lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, onlymine, clid)) {
261 ReleaseWriteLock(&avc->lock);
267 if (avc->flockCount == 0) {
268 ReleaseWriteLock(&avc->lock);
272 /* unlock the lock */
273 if (avc->flockCount > 0) {
275 for (slp = *slpp; slp;) {
276 if (!lockIdcmp2(&flock, avc, slp, onlymine, clid)) {
278 tlp = *slpp = slp->next;
279 osi_FreeSmallSpace(slp);
287 else if (avc->flockCount == -1) {
288 afs_StoreAllSegments(avc, areq, AFS_ASYNC); /* fsync file early */
290 /* And remove the (only) exclusive lock entry from the list... */
291 osi_FreeSmallSpace(avc->slocks);
294 if (avc->flockCount == 0) {
296 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
298 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
299 #ifdef RX_ENABLE_LOCKS
301 #endif /* RX_ENABLE_LOCKS */
302 code = RXAFS_ReleaseLock(tc->id, (struct AFSFid *)
303 &avc->fid.Fid, &tsync);
304 #ifdef RX_ENABLE_LOCKS
306 #endif /* RX_ENABLE_LOCKS */
311 (afs_Analyze(tc, code, &avc->fid, areq,
312 AFS_STATS_FS_RPCIDX_RELEASELOCK,
313 SHARED_LOCK, (struct cell *)0));
317 while (1) { /* set a new lock */
319 * Upgrading from shared locks to Exclusive and vice versa
320 * is a bit tricky and we don't really support it yet. But
321 * we try to support the common used one which is upgrade
322 * a shared lock to an exclusive for the same process...
324 if ((avc->flockCount > 0 && (acom & LOCK_EX)) ||
325 (avc->flockCount == -1 && (acom & LOCK_SH))) {
327 * Upgrading from shared locks to an exclusive one:
328 * For now if all the shared locks belong to the
329 * same process then we unlock them on the server
330 * and proceed with the upgrade. Unless we change the
331 * server's locking interface impl we prohibit from
332 * unlocking other processes's shared locks...
333 * Upgrading from an exclusive lock to a shared one:
334 * Again only allowed to be done by the same process.
337 for (slp = *slpp; slp;) {
338 if (!lockIdcmp2(&flock, avc, slp, 1/*!onlymine*/, clid)) {
343 tlp = *slpp = slp->next;
344 osi_FreeSmallSpace(slp);
352 if (!code && avc->flockCount == 0) {
354 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
356 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
357 #ifdef RX_ENABLE_LOCKS
359 #endif /* RX_ENABLE_LOCKS */
360 code = RXAFS_ReleaseLock(tc->id,
361 (struct AFSFid *) &avc->fid.Fid,
363 #ifdef RX_ENABLE_LOCKS
365 #endif /* RX_ENABLE_LOCKS */
370 (afs_Analyze(tc, code, &avc->fid, areq,
371 AFS_STATS_FS_RPCIDX_RELEASELOCK,
372 SHARED_LOCK, (struct cell *)0));
374 } else if (avc->flockCount == -1 && (acom & LOCK_EX)) {
375 if (lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
381 /* compatible here, decide if needs to go to file server. If
382 we've already got the file locked (and thus read-locked, since
383 we've already checked for compatibility), we shouldn't send
384 the call through to the server again */
385 if (avc->flockCount == 0) {
386 /* we're the first on our block, send the call through */
387 lockType = ((acom & LOCK_EX)? LockWrite : LockRead);
389 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
391 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
392 #ifdef RX_ENABLE_LOCKS
394 #endif /* RX_ENABLE_LOCKS */
395 code = RXAFS_SetLock(tc->id, (struct AFSFid *)
396 &avc->fid.Fid, lockType, &tsync);
397 #ifdef RX_ENABLE_LOCKS
399 #endif /* RX_ENABLE_LOCKS */
404 (afs_Analyze(tc, code, &avc->fid, areq,
405 AFS_STATS_FS_RPCIDX_SETLOCK,
406 SHARED_LOCK, (struct cell *)0));
408 else code = 0; /* otherwise, pretend things worked */
411 slp = (struct SimpleLocks *) osi_AllocSmallSpace(sizeof(struct SimpleLocks));
412 if (acom & LOCK_EX) {
417 /* Record unique id of process owning exclusive lock. */
418 avc->ownslock = getpid();
421 slp->type = LockWrite;
422 slp->next = (struct SimpleLocks *)0;
424 avc->flockCount = -1;
426 slp->type = LockRead;
427 slp->next = avc->slocks;
432 lockIdSet(&flock, slp, clid);
435 /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */
436 if(((code == EWOULDBLOCK)||(code == EAGAIN)) && !(acom & LOCK_NB)) {
437 /* sleep for a second, allowing interrupts */
438 ReleaseWriteLock(&avc->lock);
439 #if defined(AFS_SGI_ENV)
440 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
442 code = afs_osi_Wait(1000, (struct afs_osi_WaitHandle *) 0, 1);
443 #if defined(AFS_SGI_ENV)
444 AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
446 ObtainWriteLock(&avc->lock,120);
448 code = EINTR; /* return this if ^C typed */
455 ReleaseWriteLock(&avc->lock);
456 code = afs_CheckCode(code, areq, 1); /* defeat a buggy AIX optimization */
461 /* warn a user that a lock has been ignored */
462 afs_int32 lastWarnTime = 0;
463 static void DoLockWarning() {
464 register afs_int32 now;
467 AFS_STATCNT(DoLockWarning);
468 /* check if we've already warned someone recently */
469 if (now < lastWarnTime + 120) return;
471 /* otherwise, it is time to nag the user */
473 afs_warn("afs: byte-range lock/unlock ignored; make sure no one else is running this program.\n");
478 afs_lockctl(avc, af, flag, acred, clid, offset)
484 #if defined(AFS_SGI_ENV) || (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV))
485 afs_lockctl(avc, af, acmd, acred, clid)
489 afs_lockctl(avc, af, acmd, acred)
491 struct AFS_FLOCK *af;
495 struct AFS_UCRED *acred; {
496 struct vrequest treq;
502 AFS_STATCNT(afs_lockctl);
503 if (code = afs_InitReq(&treq, acred)) return code;
505 if (flag & VNOFLCK) return 0;
506 if (flag & CLNFLCK) {
508 } else if ((flag & GETFLCK) || (flag & RGETFLCK)) {
510 } else if ((flag & SETFLCK) || (flag & RSETFLCK)) {
514 #if (defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)) && !defined(AFS_SUN58_ENV)
515 if ((acmd == F_GETLK) || (acmd == F_RGETLK)) {
517 if (acmd == F_GETLK) {
519 if (af->l_type == F_UNLCK)
521 #ifndef AFS_OSF_ENV /* getlock is a no-op for osf (for now) */
522 code = HandleGetLock(avc, af, &treq, clid);
524 code = afs_CheckCode(code, &treq, 2); /* defeat buggy AIX optimz */
527 else if ((acmd == F_SETLK) || (acmd == F_SETLKW)
528 #if (defined(AFS_SUN_ENV) || defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV)) && !defined(AFS_SUN58_ENV)
529 || (acmd == F_RSETLK)|| (acmd == F_RSETLKW)) {
533 /* this next check is safer when left out, but more applications work
534 with it in. However, they fail in race conditions. The question is
535 what to do for people who don't have source to their application;
536 this way at least, they can get work done */
537 if (af->l_len == 0x7fffffff)
538 af->l_len = 0; /* since some systems indicate it as EOF */
539 /* next line makes byte range locks always succeed,
540 even when they should block */
541 if (af->l_whence != 0 || af->l_start != 0 || af->l_len != 0) {
545 /* otherwise we can turn this into a whole-file flock */
546 if (af->l_type == F_RDLCK) code = LOCK_SH;
547 else if (af->l_type == F_WRLCK) code = LOCK_EX;
548 else if (af->l_type == F_UNLCK) code = LOCK_UN;
549 else return EINVAL; /* unknown lock type */
550 if (((acmd == F_SETLK)
551 #if (defined(AFS_SGI_ENV) || defined(AFS_SUN_ENV)) && !defined(AFS_SUN58_ENV)
552 || (acmd == F_RSETLK)
554 ) && code != LOCK_UN)
555 code |= LOCK_NB; /* non-blocking, s.v.p. */
556 #if defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV) || defined(AFS_OSF_ENV)
557 code = HandleFlock(avc, code, &treq, clid, 0/*!onlymine*/);
559 #if defined(AFS_SGI_ENV)
560 AFS_RWLOCK((vnode_t *)avc, VRWLOCK_WRITE);
561 code = HandleFlock(avc, code, &treq, clid, 0/*!onlymine*/);
562 AFS_RWUNLOCK((vnode_t *)avc, VRWLOCK_WRITE);
564 code = HandleFlock(avc, code, &treq, 0, 0/*!onlymine*/);
567 code = afs_CheckCode(code, &treq, 3); /* defeat AIX -O bug */
575 * Get a description of the first lock which would
576 * block the lock specified. If the specified lock
577 * would succeed, fill in the lock structure with 'F_UNLCK'.
579 * To do that, we have to ask the server for the lock
581 * 1. The file is not locked by this machine.
582 * 2. Asking for write lock, and only the current
583 * PID has the file read locked.
585 #ifndef AFS_OSF_ENV /* getlock is a no-op for osf (for now) */
586 HandleGetLock(avc, af, areq, clid)
587 int clid; /* not used by some OSes */
588 register struct vcache *avc;
589 register struct vrequest *areq;
590 register struct AFS_FLOCK *af;
592 register afs_int32 code;
593 struct AFS_FLOCK flock;
595 lockIdSet(&flock, (struct SimpleLocks *)0, clid);
597 ObtainWriteLock(&avc->lock,122);
598 if (avc->flockCount == 0) {
599 /* We don't know ourselves, so ask the server. Unfortunately, we don't know the pid.
600 * Not even the server knows the pid. Besides, the process with the lock is on another machine
602 code = GetFlockCount(avc, areq);
603 if (code == 0 || (af->l_type == F_RDLCK && code > 0)) {
604 af->l_type = F_UNLCK;
608 af->l_type = F_RDLCK;
610 af->l_type = F_WRLCK;
613 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
619 if (af->l_type == F_RDLCK) {
621 * We want a read lock. If there are only
622 * read locks, or we are the one with the
623 * write lock, say it is unlocked.
625 if (avc->flockCount > 0 || /* only read locks */
626 !lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
627 af->l_type = F_UNLCK;
631 /* one write lock, but who? */
632 af->l_type = F_WRLCK; /* not us, so lock would block */
633 if (avc->slocks) { /* we know who, so tell */
634 af->l_pid = avc->slocks->pid;
635 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
636 af->l_sysid = avc->slocks->sysid;
639 af->l_pid = 0; /* XXX can't happen?? */
640 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
648 * Ok, we want a write lock. If there is a write lock
649 * already, and it is not this process, we fail.
651 if (avc->flockCount < 0) {
652 if (lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
653 af->l_type = F_WRLCK;
655 af->l_pid = avc->slocks->pid;
656 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
657 af->l_sysid = avc->slocks->sysid;
660 af->l_pid = 0; /* XXX can't happen?? */
661 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
667 /* we are the one with the write lock */
668 af->l_type = F_UNLCK;
673 * Want a write lock, and we know there are read locks.
674 * If there is more than one, or it isn't us, we cannot lock.
676 if ((avc->flockCount > 1)
677 || lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
678 struct SimpleLocks *slp;
680 af->l_type = F_RDLCK;
682 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
685 /* find a pid that isn't our own */
686 for (slp = avc->slocks; slp; slp = slp->next) {
687 if (lockIdcmp2(&flock, (struct vcache *)0, slp, 1, clid)) {
688 af->l_pid = slp->pid;
689 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
690 af->l_sysid = avc->slocks->sysid;
699 * Ok, we want a write lock. If there is a write lock
700 * already, and it is not this process, we fail.
702 if (avc->flockCount < 0) {
703 if (lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
704 af->l_type = F_WRLCK;
706 af->l_pid = avc->slocks->pid;
707 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
708 af->l_sysid = avc->slocks->sysid;
711 af->l_pid = 0; /* XXX can't happen?? */
712 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
718 /* we are the one with the write lock */
719 af->l_type = F_UNLCK;
724 * Want a write lock, and we know there are read locks.
725 * If there is more than one, or it isn't us, we cannot lock.
727 if ((avc->flockCount > 1)
728 || lockIdcmp2(&flock, avc, (struct SimpleLocks *)0, 1, clid)) {
729 struct SimpleLocks *slp;
730 af->l_type = F_RDLCK;
732 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
735 /* find a pid that isn't our own */
736 for (slp = avc->slocks; slp; slp = slp->next) {
737 if (lockIdcmp2(&flock, (struct vcache *)0, slp, 1, clid)) {
738 af->l_pid = slp->pid;
739 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
740 af->l_sysid = avc->slocks->sysid;
749 * Want a write lock, and there is just one read lock, and it
750 * is this process with a read lock. Ask the server if there
751 * are any more processes with the file locked.
753 code = GetFlockCount(avc, areq);
754 if (code == 0 || code == 1) {
755 af->l_type = F_UNLCK;
759 af->l_type = F_RDLCK;
761 af->l_type = F_WRLCK;
763 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
770 af->l_len = 0; /* to end of file */
773 ReleaseWriteLock(&avc->lock);
777 /* Get the 'flock' count from the server. This comes back in a 'spare'
778 * field from a GetStatus RPC. If we have any problems with the RPC,
779 * we lie and say the file is unlocked. If we ask any 'old' fileservers,
780 * the spare field will be a zero, saying the file is unlocked. This is
781 * OK, as a further 'lock' request will do the right thing.
783 static int GetFlockCount(struct vcache *avc, struct vrequest *areq)
785 register struct conn *tc;
786 register afs_int32 code;
787 struct AFSFetchStatus OutStatus;
788 struct AFSCallBack CallBack;
789 struct AFSVolSync tsync;
793 temp = areq->flags & O_NONBLOCK;
794 areq->flags |= O_NONBLOCK;
797 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
799 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
800 #ifdef RX_ENABLE_LOCKS
802 #endif /* RX_ENABLE_LOCKS */
803 code = RXAFS_FetchStatus(tc->id, (struct AFSFid *) &avc->fid.Fid,
804 &OutStatus, &CallBack, &tsync);
805 #ifdef RX_ENABLE_LOCKS
807 #endif /* RX_ENABLE_LOCKS */
811 (afs_Analyze(tc, code, &avc->fid, areq,
812 AFS_STATS_FS_RPCIDX_FETCHSTATUS,
813 SHARED_LOCK, (struct cell *)0));
816 areq->flags &= ~O_NONBLOCK;
819 return(0); /* failed, say it is 'unlocked' */
821 return((int)OutStatus.spare2);
827 #if !defined(AFS_AIX_ENV) && !defined(AFS_HPUX_ENV) && !defined(AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(UKERNEL) && !defined(AFS_LINUX20_ENV)
828 /* Flock not support on System V systems */
830 extern struct fileops afs_fileops;
831 afs_xflock (p, args, retval)
836 #else /* AFS_OSF_ENV */
845 struct vrequest treq;
849 AFS_STATCNT(afs_xflock);
852 uap = (struct a *)args;
853 getf(&fd, uap->fd, FILE_FLAGS_NULL, &u.u_file_state);
854 #else /* AFS_OSF_ENV */
855 uap = (struct a *)u.u_ap;
860 if (flockDone = afs_InitReq(&treq, u.u_cred)) return flockDone;
861 /* first determine whether this is any sort of vnode */
862 if (fd->f_type == DTYPE_VNODE) {
863 /* good, this is a vnode; next see if it is an AFS vnode */
864 tvc = (struct vcache *) fd->f_data; /* valid, given a vnode */
865 if (IsAfsVnode((struct vnode *)tvc)) {
866 /* This is an AFS vnode, so do the work */
868 /* find real vcache entry; shouldn't be null if gnode ref count
871 tvc = (struct vcache *) afs_gntovn(tvc);
877 if ((fd->f_flag & (FEXLOCK | FSHLOCK)) && !(uap->com & LOCK_UN)) {
878 /* First, if fd already has lock, release it for relock path */
879 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV))
880 HandleFlock(tvc, LOCK_UN, &treq, u.u_procp->p_pid, 0/*!onlymine*/);
882 HandleFlock(tvc, LOCK_UN, &treq, 0, 0/*!onlymine*/);
884 fd->f_flag &= ~(FEXLOCK | FSHLOCK);
886 /* now try the requested operation */
888 #if defined(AFS_SGI_ENV) || defined(AFS_OSF_ENV) || (defined(AFS_SUN_ENV) && !defined(AFS_SUN5_ENV))
889 code = HandleFlock(tvc, uap->com, &treq,
890 u.u_procp->p_pid, 0/*!onlymine*/);
892 code = HandleFlock(tvc, uap->com, &treq, 0, 0/*!onlymine*/);
898 if (uap->com & LOCK_UN) {
900 fd->f_flag &= ~(FEXLOCK | FSHLOCK);
905 #else /* AFS_OSF_ENV */
908 if (uap->com & LOCK_SH) fd->f_flag |= FSHLOCK;
909 else if (uap->com & LOCK_EX) fd->f_flag |= FEXLOCK;
913 fd->f_ops = &afs_fileops;
918 code = flock(p, args, retval);
925 #else /* AFS_OSF_ENV */
928 (*afs_longcall_procs.LC_flock)();
935 #endif /* !defined(AFS_AIX_ENV) && !defined(AFS_HPUX_ENV) && !defined(AFS_SUN5_ENV) && !defined(UKERNEL) && !defined(AFS_LINUX20_ENV) */