2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
15 #include <afsconfig.h>
16 #include "afs/param.h"
19 #include "afs/sysincludes.h" /* Standard vendor system headers */
20 #include "afsincludes.h" /* Afs-based standard headers */
21 #include "afs/afs_stats.h" /* statistics */
22 #include "afs/afs_cbqueue.h"
23 #include "afs/nfsclient.h"
24 #include "afs/afs_osidnlc.h"
25 #include "afs/unified_afs.h"
31 /* Static prototypes */
32 static int HandleGetLock(struct vcache *avc,
34 struct vrequest *areq, int clid);
35 static int GetFlockCount(struct vcache *avc, struct vrequest *areq);
36 static int lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
37 struct SimpleLocks *alp, int onlymine,
40 /* int clid; * non-zero on SGI, OSF, SunOS, Darwin, xBSD ** XXX ptr type */
42 #if defined(AFS_SUN5_ENV)
44 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
46 proc_t *procp = ttoproc(curthread);
50 slp->pid = procp->p_pid;
53 flock->l_pid = procp->p_pid;
56 #elif defined(AFS_SGI_ENV)
58 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
60 # if defined(AFS_SGI65_ENV)
62 get_current_flid(&flid);
64 afs_proc_t *procp = OSI_GET_CURRENT_PROCP();
69 slp->sysid = flid.fl_sysid;
71 slp->sysid = OSI_GET_CURRENT_SYSID();
76 flock->l_sysid = flid.fl_sysid;
78 flock->l_sysid = OSI_GET_CURRENT_SYSID();
83 #elif defined(AFS_AIX_ENV)
85 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
87 # if !defined(AFS_AIX32_ENV)
88 afs_proc_t *procp = u.u_procp;
92 # if defined(AFS_AIX41_ENV)
95 # elif defined(AFS_AIX32_ENV)
96 slp->sysid = u.u_sysid;
99 slp->sysid = procp->p_sysid;
100 slp->pid = prcop->p_epid;
103 # if defined(AFS_AIX41_ENV)
105 flock->l_pid = getpid();
106 # elif defined(AFS_AIX32_ENV)
107 flock->l_sysid = u.u_sysid;
108 flock->l_pid = u.u_epid;
110 flock->l_sysid = procp->p_sysid;
111 flock->l_pid = procp->p_epid;
115 #elif defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
117 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
125 #elif defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
127 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
132 flock->l_pid = getpid();
135 #elif defined(UKERNEL)
137 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
140 slp->pid = get_user_struct()->u_procp->p_pid;
142 flock->l_pid = get_user_struct()->u_procp->p_pid;
147 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
150 slp->pid = u.u_procp->p_pid;
152 flock->l_pid = u.u_procp->p_pid;
157 /* return 1 (true) if specified flock does not match alp (if
158 * specified), or any of the slp structs (if alp == 0)
160 /* I'm not sure that the comparsion of flock->pid to p_ppid
161 * is correct. Should that be a comparision of alp (or slp) ->pid
162 * to p_ppid? Especially in the context of the lower loop, where
163 * the repeated comparison doesn't make much sense...
165 /* onlymine - don't match any locks which are held by my parent */
166 /* clid - only irix 6.5 */
169 lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
170 struct SimpleLocks *alp, int onlymine, int clid)
172 struct SimpleLocks *slp;
173 #if defined(AFS_SUN5_ENV)
174 proc_t *procp = ttoproc(curthread);
176 #if !defined(AFS_AIX41_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_SGI65_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
178 afs_proc_t *procp = curprocp;
179 #elif defined(UKERNEL)
180 afs_proc_t *procp = get_user_struct()->u_procp;
182 afs_proc_t *procp = u.u_procp;
183 #endif /* AFS_SGI64_ENV */
188 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
189 if (flock1->l_sysid != alp->sysid) {
193 if ((flock1->l_pid == alp->pid) ||
194 #if defined(AFS_AIX41_ENV) || defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
195 (!onlymine && (flock1->l_pid == getppid()))
197 #if defined(AFS_SGI65_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
198 /* XXX check this. used to be *only* irix for some reason. */
199 (!onlymine && (flock1->l_pid == clid))
201 (!onlymine && (flock1->l_pid == procp->p_ppid))
210 for (slp = vp->slocks; slp; slp = slp->next) {
211 #if defined(AFS_HAVE_FLOCK_SYSID)
212 if (flock1->l_sysid != slp->sysid) {
216 if (flock1->l_pid == slp->pid) {
220 return (1); /* failure */
224 /* we don't send multiple read flocks to the server, but rather just count
225 them up ourselves. Of course, multiple write locks are incompatible.
227 Note that we should always try to release a lock, even if we have
228 a network problem sending the release command through, since often
229 a lock is released on a close call, when the user can't retry anyway.
231 After we remove it from our structure, the lock will no longer be
232 kept alive, and the server should time it out within a few minutes.
234 94.04.13 add "force" parameter. If a child explicitly unlocks a
235 file, I guess we'll permit it. however, we don't want simple,
236 innocent closes by children to unlock files in the parent process.
238 If called when disconnected support is unabled, the discon_lock must
241 /* clid - nonzero on sgi sunos osf1 only */
243 HandleFlock(struct vcache *avc, int acom, struct vrequest *areq,
244 pid_t clid, int onlymine)
247 struct SimpleLocks *slp, *tlp, **slpp;
249 struct AFSVolSync tsync;
251 struct AFS_FLOCK flock;
253 AFS_STATCNT(HandleFlock);
254 code = 0; /* default when we don't make any network calls */
255 lockIdSet(&flock, NULL, clid);
257 #if defined(AFS_SGI_ENV)
258 osi_Assert(valusema(&avc->vc_rwlock) <= 0);
259 osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
261 ObtainWriteLock(&avc->lock, 118);
262 if (acom & LOCK_UN) {
263 int stored_segments = 0;
269 /* If the lock is held exclusive, then only the owning process
270 * or a child can unlock it. Use pid and ppid because they are
271 * unique identifiers.
273 if ((avc->flockCount < 0) && (getpid() != avc->ownslock)) {
275 if (onlymine || (getppid() != avc->ownslock)) {
277 if (onlymine || (u.u_procp->p_ppid != avc->ownslock)) {
279 ReleaseWriteLock(&avc->lock);
284 if (lockIdcmp2(&flock, avc, NULL, onlymine, clid)) {
285 ReleaseWriteLock(&avc->lock);
291 if (avc->flockCount == 0) {
292 ReleaseWriteLock(&avc->lock);
296 /* unlock the lock */
297 if (avc->flockCount > 0) {
299 for (slp = *slpp; slp;) {
300 if (!lockIdcmp2(&flock, avc, slp, onlymine, clid)) {
302 tlp = *slpp = slp->next;
303 osi_FreeSmallSpace(slp);
310 } else if (avc->flockCount == -1) {
311 if (!stored_segments) {
312 afs_StoreAllSegments(avc, areq, AFS_SYNC | AFS_VMSYNC); /* fsync file early */
313 /* afs_StoreAllSegments can drop and reacquire the write lock
314 * on avc and GLOCK, so the flocks may be completely different
315 * now. Go back and perform all checks again. */
320 /* And remove the (only) exclusive lock entry from the list... */
321 osi_FreeSmallSpace(avc->slocks);
324 if (avc->flockCount == 0) {
325 if (!AFS_IS_DISCONNECTED) {
326 struct rx_connection *rxconn;
328 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
330 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
332 code = RXAFS_ReleaseLock(rxconn, (struct AFSFid *)
333 &avc->f.fid.Fid, &tsync);
339 (tc, rxconn, code, &avc->f.fid, areq,
340 AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL));
342 /*printf("Network is dooooooowwwwwwwnnnnnnn\n");*/
347 while (1) { /* set a new lock */
349 * Upgrading from shared locks to Exclusive and vice versa
350 * is a bit tricky and we don't really support it yet. But
351 * we try to support the common used one which is upgrade
352 * a shared lock to an exclusive for the same process...
354 if ((avc->flockCount > 0 && (acom & LOCK_EX))
355 || (avc->flockCount == -1 && (acom & LOCK_SH))) {
357 * Upgrading from shared locks to an exclusive one:
358 * For now if all the shared locks belong to the
359 * same process then we unlock them on the server
360 * and proceed with the upgrade. Unless we change the
361 * server's locking interface impl we prohibit from
362 * unlocking other processes's shared locks...
363 * Upgrading from an exclusive lock to a shared one:
364 * Again only allowed to be done by the same process.
367 for (slp = *slpp; slp;) {
369 (&flock, avc, slp, 1 /*!onlymine */ , clid)) {
374 tlp = *slpp = slp->next;
375 osi_FreeSmallSpace(slp);
383 if (!code && avc->flockCount == 0) {
384 if (!AFS_IS_DISCONNECTED) {
385 struct rx_connection *rxconn;
387 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
390 (AFS_STATS_FS_RPCIDX_RELEASELOCK);
393 RXAFS_ReleaseLock(rxconn,
394 (struct AFSFid *)&avc->
401 (tc, rxconn, code, &avc->f.fid, areq,
402 AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK,
406 } else if (avc->flockCount == -1 && (acom & LOCK_EX)) {
407 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
411 /* We've just re-grabbed an exclusive lock, so we don't
412 * need to contact the fileserver, and we don't need to
413 * add the lock to avc->slocks (since we already have a
414 * lock there). So, we are done. */
419 /* compatible here, decide if needs to go to file server. If
420 * we've already got the file locked (and thus read-locked, since
421 * we've already checked for compatibility), we shouldn't send
422 * the call through to the server again */
423 if (avc->flockCount == 0) {
424 struct rx_connection *rxconn;
425 /* we're the first on our block, send the call through */
426 lockType = ((acom & LOCK_EX) ? LockWrite : LockRead);
427 if (!AFS_IS_DISCONNECTED) {
429 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
431 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
433 code = RXAFS_SetLock(rxconn, (struct AFSFid *)
434 &avc->f.fid.Fid, lockType,
441 (tc, rxconn, code, &avc->f.fid, areq,
442 AFS_STATS_FS_RPCIDX_SETLOCK, SHARED_LOCK,
444 if ((lockType == LockWrite) && (code == VREADONLY))
445 code = EBADF; /* per POSIX; VREADONLY == EROFS */
447 /* XXX - Should probably try and log this when we're
448 * XXX - running with logging enabled. But it's horrid
450 code = 0; /* pretend we worked - ick!!! */
452 code = 0; /* otherwise, pretend things worked */
455 slp = (struct SimpleLocks *)
456 osi_AllocSmallSpace(sizeof(struct SimpleLocks));
457 if (acom & LOCK_EX) {
462 /* Record unique id of process owning exclusive lock. */
463 avc->ownslock = getpid();
466 slp->type = LockWrite;
469 avc->flockCount = -1;
471 slp->type = LockRead;
472 slp->next = avc->slocks;
477 lockIdSet(&flock, slp, clid);
480 /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */
481 if (((code == EWOULDBLOCK) || (code == EAGAIN) ||
482 (code == UAEWOULDBLOCK) || (code == UAEAGAIN))
483 && !(acom & LOCK_NB)) {
484 /* sleep for a second, allowing interrupts */
485 ReleaseWriteLock(&avc->lock);
486 #if defined(AFS_SGI_ENV)
487 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
489 code = afs_osi_Wait(1000, NULL, 1);
490 #if defined(AFS_SGI_ENV)
491 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
493 ObtainWriteLock(&avc->lock, 120);
495 code = EINTR; /* return this if ^C typed */
502 ReleaseWriteLock(&avc->lock);
503 code = afs_CheckCode(code, areq, 1); /* defeat a buggy AIX optimization */
508 /* warn a user that a lock has been ignored */
510 DoLockWarning(afs_ucred_t * acred)
512 static afs_uint32 lastWarnTime;
513 static pid_t lastWarnPid;
516 pid_t pid = MyPidxx2Pid(MyPidxx);
522 AFS_STATCNT(DoLockWarning);
524 /* check if we've already warned this user recently */
525 if ((now < lastWarnTime + 120) && (lastWarnPid == pid)) {
529 procname = afs_osi_Alloc(256);
534 /* Copies process name to allocated procname, see osi_machdeps for details of macro */
535 osi_procname(procname, 256);
536 procname[255] = '\0';
541 #ifdef AFS_LINUX26_ENV
542 message = "byte-range locks only enforced for processes on this machine";
544 message = "byte-range lock/unlock ignored; make sure no one else is running this program";
547 afs_warnuser("afs: %s (pid %d (%s), user %ld).\n",
548 message, pid, procname, (long)afs_cr_uid(acred));
550 afs_osi_Free(procname, 256);
554 #if defined(AFS_SGI_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
555 int afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd,
556 afs_ucred_t * acred, pid_t clid)
559 int afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd,
563 struct vrequest treq;
565 struct afs_fakestat_state fakestate;
567 AFS_STATCNT(afs_lockctl);
568 if ((code = afs_InitReq(&treq, acred)))
570 afs_InitFakeStat(&fakestate);
574 code = afs_EvalFakeStat(&avc, &fakestate, &treq);
578 #if defined(AFS_SGI_ENV)
579 if ((acmd == F_GETLK) || (acmd == F_RGETLK)) {
581 if (acmd == F_GETLK) {
583 if (af->l_type == F_UNLCK) {
587 code = HandleGetLock(avc, af, &treq, clid);
588 code = afs_CheckCode(code, &treq, 2); /* defeat buggy AIX optimz */
590 } else if ((acmd == F_SETLK) || (acmd == F_SETLKW)
591 #if defined(AFS_SGI_ENV)
592 || (acmd == F_RSETLK) || (acmd == F_RSETLKW)) {
597 if ((avc->f.states & CRO)) {
598 /* for RO volumes, don't do anything for locks; the fileserver doesn't
599 * even track them. A write lock should not be possible, though. */
600 if (af->l_type == F_WRLCK) {
608 /* Java VMs ask for l_len=(long)-1 regardless of OS/CPU */
609 if ((sizeof(af->l_len) == 8) && (af->l_len == 0x7fffffffffffffffLL))
611 /* next line makes byte range locks always succeed,
612 * even when they should block */
613 if (af->l_whence != 0 || af->l_start != 0 || af->l_len != 0) {
614 DoLockWarning(acred);
618 /* otherwise we can turn this into a whole-file flock */
619 if (af->l_type == F_RDLCK)
621 else if (af->l_type == F_WRLCK)
623 else if (af->l_type == F_UNLCK)
626 code = EINVAL; /* unknown lock type */
629 if (((acmd == F_SETLK)
630 #if defined(AFS_SGI_ENV)
631 || (acmd == F_RSETLK)
633 ) && code != LOCK_UN)
634 code |= LOCK_NB; /* non-blocking, s.v.p. */
635 #if defined(AFS_DARWIN_ENV)
636 code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ );
637 #elif defined(AFS_SGI_ENV)
638 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
639 code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ );
640 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
642 code = HandleFlock(avc, code, &treq, 0, 0 /*!onlymine */ );
644 code = afs_CheckCode(code, &treq, 3); /* defeat AIX -O bug */
649 afs_PutFakeStat(&fakestate);
656 * Get a description of the first lock which would
657 * block the lock specified. If the specified lock
658 * would succeed, fill in the lock structure with 'F_UNLCK'.
660 * To do that, we have to ask the server for the lock
662 * 1. The file is not locked by this machine.
663 * 2. Asking for write lock, and only the current
664 * PID has the file read locked.
667 HandleGetLock(struct vcache *avc, struct AFS_FLOCK *af,
668 struct vrequest *areq, int clid)
671 struct AFS_FLOCK flock;
673 lockIdSet(&flock, NULL, clid);
675 ObtainWriteLock(&avc->lock, 122);
676 if (avc->flockCount == 0) {
678 * We don't know ourselves, so ask the server. Unfortunately, we
679 * don't know the pid. Not even the server knows the pid. Besides,
680 * the process with the lock is on another machine
682 code = GetFlockCount(avc, areq);
683 if (code == 0 || (af->l_type == F_RDLCK && code > 0)) {
684 af->l_type = F_UNLCK;
688 af->l_type = F_RDLCK;
690 af->l_type = F_WRLCK;
693 #if defined(AFS_HAVE_FLOCK_SYSID)
699 if (af->l_type == F_RDLCK) {
701 * We want a read lock. If there are only
702 * read locks, or we are the one with the
703 * write lock, say it is unlocked.
705 if (avc->flockCount > 0 || /* only read locks */
706 !lockIdcmp2(&flock, avc, NULL, 1, clid)) {
707 af->l_type = F_UNLCK;
711 /* one write lock, but who? */
712 af->l_type = F_WRLCK; /* not us, so lock would block */
713 if (avc->slocks) { /* we know who, so tell */
714 af->l_pid = avc->slocks->pid;
715 #if defined(AFS_HAVE_FLOCK_SYSID)
716 af->l_sysid = avc->slocks->sysid;
719 af->l_pid = 0; /* XXX can't happen?? */
720 #if defined(AFS_HAVE_FLOCK_SYSID)
728 * Ok, we want a write lock. If there is a write lock
729 * already, and it is not this process, we fail.
731 if (avc->flockCount < 0) {
732 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
733 af->l_type = F_WRLCK;
735 af->l_pid = avc->slocks->pid;
736 #if defined(AFS_HAVE_FLOCK_SYSID)
737 af->l_sysid = avc->slocks->sysid;
740 af->l_pid = 0; /* XXX can't happen?? */
741 #if defined(AFS_HAVE_FLOCK_SYSID)
747 /* we are the one with the write lock */
748 af->l_type = F_UNLCK;
753 * Want a write lock, and we know there are read locks.
754 * If there is more than one, or it isn't us, we cannot lock.
756 if ((avc->flockCount > 1)
757 || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
758 struct SimpleLocks *slp;
760 af->l_type = F_RDLCK;
762 #if defined(AFS_HAVE_FLOCK_SYSID)
765 /* find a pid that isn't our own */
766 for (slp = avc->slocks; slp; slp = slp->next) {
767 if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
768 af->l_pid = slp->pid;
769 #if defined(AFS_HAVE_FLOCK_SYSID)
770 af->l_sysid = avc->slocks->sysid;
779 * Ok, we want a write lock. If there is a write lock
780 * already, and it is not this process, we fail.
782 if (avc->flockCount < 0) {
783 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
784 af->l_type = F_WRLCK;
786 af->l_pid = avc->slocks->pid;
787 #if defined(AFS_HAVE_FLOCK_SYSID)
788 af->l_sysid = avc->slocks->sysid;
791 af->l_pid = 0; /* XXX can't happen?? */
792 #if defined(AFS_HAVE_FLOCK_SYSID)
798 /* we are the one with the write lock */
799 af->l_type = F_UNLCK;
804 * Want a write lock, and we know there are read locks.
805 * If there is more than one, or it isn't us, we cannot lock.
807 if ((avc->flockCount > 1)
808 || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
809 struct SimpleLocks *slp;
810 af->l_type = F_RDLCK;
812 #if defined(AFS_HAVE_FLOCK_SYSID)
815 /* find a pid that isn't our own */
816 for (slp = avc->slocks; slp; slp = slp->next) {
817 if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
818 af->l_pid = slp->pid;
819 #if defined(AFS_HAVE_FLOCK_SYSID)
820 af->l_sysid = avc->slocks->sysid;
829 * Want a write lock, and there is just one read lock, and it
830 * is this process with a read lock. Ask the server if there
831 * are any more processes with the file locked.
833 code = GetFlockCount(avc, areq);
834 if (code == 0 || code == 1) {
835 af->l_type = F_UNLCK;
839 af->l_type = F_RDLCK;
841 af->l_type = F_WRLCK;
843 #if defined(AFS_HAVE_FLOCK_SYSID)
850 af->l_len = 0; /* to end of file */
853 ReleaseWriteLock(&avc->lock);
857 /* Get the 'flock' count from the server. This comes back in a 'spare'
858 * field from a GetStatus RPC. If we have any problems with the RPC,
859 * we lie and say the file is unlocked. If we ask any 'old' fileservers,
860 * the spare field will be a zero, saying the file is unlocked. This is
861 * OK, as a further 'lock' request will do the right thing.
864 GetFlockCount(struct vcache *avc, struct vrequest *areq)
868 struct AFSFetchStatus OutStatus;
869 struct AFSCallBack CallBack;
870 struct AFSVolSync tsync;
871 struct rx_connection *rxconn;
874 temp = areq->flags & O_NONBLOCK;
875 areq->flags |= O_NONBLOCK;
877 /* If we're disconnected, lie and say that we've got no locks. Ick */
878 if (AFS_IS_DISCONNECTED)
882 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
884 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
887 RXAFS_FetchStatus(rxconn, (struct AFSFid *)&avc->f.fid.Fid,
888 &OutStatus, &CallBack, &tsync);
894 (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
898 areq->flags &= ~O_NONBLOCK;
901 return (0); /* failed, say it is 'unlocked' */
903 return ((int)OutStatus.lockCount);