2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
15 #include <afsconfig.h>
16 #include "afs/param.h"
19 #include "afs/sysincludes.h" /* Standard vendor system headers */
20 #include "afsincludes.h" /* Afs-based standard headers */
21 #include "afs/afs_stats.h" /* statistics */
22 #include "afs/afs_cbqueue.h"
23 #include "afs/nfsclient.h"
24 #include "afs/afs_osidnlc.h"
25 #include "afs/unified_afs.h"
31 /* Static prototypes */
32 static int HandleGetLock(struct vcache *avc,
34 struct vrequest *areq, int clid);
35 static int GetFlockCount(struct vcache *avc, struct vrequest *areq);
36 static int lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
37 struct SimpleLocks *alp, int onlymine,
39 static void DoLockWarning(afs_ucred_t * acred);
41 /* int clid; * non-zero on SGI, OSF, SunOS, Darwin, xBSD ** XXX ptr type */
43 #if defined(AFS_SUN5_ENV)
45 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
47 proc_t *procp = ttoproc(curthread);
51 slp->pid = procp->p_pid;
54 flock->l_pid = procp->p_pid;
57 #elif defined(AFS_SGI_ENV)
59 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
61 # if defined(AFS_SGI65_ENV)
63 get_current_flid(&flid);
65 afs_proc_t *procp = OSI_GET_CURRENT_PROCP();
70 slp->sysid = flid.fl_sysid;
72 slp->sysid = OSI_GET_CURRENT_SYSID();
77 flock->l_sysid = flid.fl_sysid;
79 flock->l_sysid = OSI_GET_CURRENT_SYSID();
84 #elif defined(AFS_AIX_ENV)
86 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
88 # if !defined(AFS_AIX32_ENV)
89 afs_proc_t *procp = u.u_procp;
93 # if defined(AFS_AIX41_ENV)
96 # elif defined(AFS_AIX32_ENV)
97 slp->sysid = u.u_sysid;
100 slp->sysid = procp->p_sysid;
101 slp->pid = prcop->p_epid;
104 # if defined(AFS_AIX41_ENV)
106 flock->l_pid = getpid();
107 # elif defined(AFS_AIX32_ENV)
108 flock->l_sysid = u.u_sysid;
109 flock->l_pid = u.u_epid;
111 flock->l_sysid = procp->p_sysid;
112 flock->l_pid = procp->p_epid;
116 #elif defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
118 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
126 #elif defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
128 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
133 flock->l_pid = getpid();
136 #elif defined(UKERNEL)
138 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
141 slp->pid = get_user_struct()->u_procp->p_pid;
143 flock->l_pid = get_user_struct()->u_procp->p_pid;
148 lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
151 slp->pid = u.u_procp->p_pid;
153 flock->l_pid = u.u_procp->p_pid;
158 /* return 1 (true) if specified flock does not match alp (if
159 * specified), or any of the slp structs (if alp == 0)
161 /* I'm not sure that the comparsion of flock->pid to p_ppid
162 * is correct. Should that be a comparision of alp (or slp) ->pid
163 * to p_ppid? Especially in the context of the lower loop, where
164 * the repeated comparison doesn't make much sense...
166 /* onlymine - don't match any locks which are held by my parent */
167 /* clid - only irix 6.5 */
170 lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
171 struct SimpleLocks *alp, int onlymine, int clid)
173 struct SimpleLocks *slp;
174 #if defined(AFS_SUN5_ENV)
175 proc_t *procp = ttoproc(curthread);
177 #if !defined(AFS_AIX41_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_SGI65_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
179 afs_proc_t *procp = curprocp;
180 #elif defined(UKERNEL)
181 afs_proc_t *procp = get_user_struct()->u_procp;
183 afs_proc_t *procp = u.u_procp;
184 #endif /* AFS_SGI64_ENV */
189 #if defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
190 if (flock1->l_sysid != alp->sysid) {
194 if ((flock1->l_pid == alp->pid) ||
195 #if defined(AFS_AIX41_ENV) || defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
196 (!onlymine && (flock1->l_pid == getppid()))
198 #if defined(AFS_SGI65_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
199 /* XXX check this. used to be *only* irix for some reason. */
200 (!onlymine && (flock1->l_pid == clid))
202 (!onlymine && (flock1->l_pid == procp->p_ppid))
211 for (slp = vp->slocks; slp; slp = slp->next) {
212 #if defined(AFS_HAVE_FLOCK_SYSID)
213 if (flock1->l_sysid != slp->sysid) {
217 if (flock1->l_pid == slp->pid) {
221 return (1); /* failure */
225 /* we don't send multiple read flocks to the server, but rather just count
226 them up ourselves. Of course, multiple write locks are incompatible.
228 Note that we should always try to release a lock, even if we have
229 a network problem sending the release command through, since often
230 a lock is released on a close call, when the user can't retry anyway.
232 After we remove it from our structure, the lock will no longer be
233 kept alive, and the server should time it out within a few minutes.
235 94.04.13 add "force" parameter. If a child explicitly unlocks a
236 file, I guess we'll permit it. however, we don't want simple,
237 innocent closes by children to unlock files in the parent process.
239 If called when disconnected support is unabled, the discon_lock must
242 /* clid - nonzero on sgi sunos osf1 only */
244 HandleFlock(struct vcache *avc, int acom, struct vrequest *areq,
245 pid_t clid, int onlymine)
248 struct SimpleLocks *slp, *tlp, **slpp;
250 struct AFSVolSync tsync;
252 struct AFS_FLOCK flock;
254 AFS_STATCNT(HandleFlock);
255 code = 0; /* default when we don't make any network calls */
256 lockIdSet(&flock, NULL, clid);
258 #if defined(AFS_SGI_ENV)
259 osi_Assert(valusema(&avc->vc_rwlock) <= 0);
260 osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid);
262 ObtainWriteLock(&avc->lock, 118);
263 if (acom & LOCK_UN) {
264 int stored_segments = 0;
270 /* If the lock is held exclusive, then only the owning process
271 * or a child can unlock it. Use pid and ppid because they are
272 * unique identifiers.
274 if ((avc->flockCount < 0) && (getpid() != avc->ownslock)) {
276 if (onlymine || (getppid() != avc->ownslock)) {
278 if (onlymine || (u.u_procp->p_ppid != avc->ownslock)) {
280 ReleaseWriteLock(&avc->lock);
285 if (lockIdcmp2(&flock, avc, NULL, onlymine, clid)) {
286 ReleaseWriteLock(&avc->lock);
292 if (avc->flockCount == 0) {
293 ReleaseWriteLock(&avc->lock);
297 /* unlock the lock */
298 if (avc->flockCount > 0) {
300 for (slp = *slpp; slp;) {
301 if (!lockIdcmp2(&flock, avc, slp, onlymine, clid)) {
303 tlp = *slpp = slp->next;
304 osi_FreeSmallSpace(slp);
311 } else if (avc->flockCount == -1) {
312 if (!stored_segments) {
313 afs_StoreAllSegments(avc, areq, AFS_SYNC | AFS_VMSYNC); /* fsync file early */
314 /* afs_StoreAllSegments can drop and reacquire the write lock
315 * on avc and GLOCK, so the flocks may be completely different
316 * now. Go back and perform all checks again. */
321 /* And remove the (only) exclusive lock entry from the list... */
322 osi_FreeSmallSpace(avc->slocks);
325 if (avc->flockCount == 0) {
326 if (!AFS_IS_DISCONNECTED) {
327 struct rx_connection *rxconn;
329 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
331 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
333 code = RXAFS_ReleaseLock(rxconn, (struct AFSFid *)
334 &avc->f.fid.Fid, &tsync);
340 (tc, rxconn, code, &avc->f.fid, areq,
341 AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL));
343 /*printf("Network is dooooooowwwwwwwnnnnnnn\n");*/
348 while (1) { /* set a new lock */
350 * Upgrading from shared locks to Exclusive and vice versa
351 * is a bit tricky and we don't really support it yet. But
352 * we try to support the common used one which is upgrade
353 * a shared lock to an exclusive for the same process...
355 if ((avc->flockCount > 0 && (acom & LOCK_EX))
356 || (avc->flockCount == -1 && (acom & LOCK_SH))) {
358 * Upgrading from shared locks to an exclusive one:
359 * For now if all the shared locks belong to the
360 * same process then we unlock them on the server
361 * and proceed with the upgrade. Unless we change the
362 * server's locking interface impl we prohibit from
363 * unlocking other processes's shared locks...
364 * Upgrading from an exclusive lock to a shared one:
365 * Again only allowed to be done by the same process.
368 for (slp = *slpp; slp;) {
370 (&flock, avc, slp, 1 /*!onlymine */ , clid)) {
375 tlp = *slpp = slp->next;
376 osi_FreeSmallSpace(slp);
384 if (!code && avc->flockCount == 0) {
385 if (!AFS_IS_DISCONNECTED) {
386 struct rx_connection *rxconn;
388 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
391 (AFS_STATS_FS_RPCIDX_RELEASELOCK);
394 RXAFS_ReleaseLock(rxconn,
395 (struct AFSFid *)&avc->
402 (tc, rxconn, code, &avc->f.fid, areq,
403 AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK,
407 } else if (avc->flockCount == -1 && (acom & LOCK_EX)) {
408 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
412 /* We've just re-grabbed an exclusive lock, so we don't
413 * need to contact the fileserver, and we don't need to
414 * add the lock to avc->slocks (since we already have a
415 * lock there). So, we are done. */
420 /* compatible here, decide if needs to go to file server. If
421 * we've already got the file locked (and thus read-locked, since
422 * we've already checked for compatibility), we shouldn't send
423 * the call through to the server again */
424 if (avc->flockCount == 0) {
425 struct rx_connection *rxconn;
426 /* we're the first on our block, send the call through */
427 lockType = ((acom & LOCK_EX) ? LockWrite : LockRead);
428 if (!AFS_IS_DISCONNECTED) {
430 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
432 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
434 code = RXAFS_SetLock(rxconn, (struct AFSFid *)
435 &avc->f.fid.Fid, lockType,
442 (tc, rxconn, code, &avc->f.fid, areq,
443 AFS_STATS_FS_RPCIDX_SETLOCK, SHARED_LOCK,
445 if ((lockType == LockWrite) && (code == VREADONLY))
446 code = EBADF; /* per POSIX; VREADONLY == EROFS */
448 /* XXX - Should probably try and log this when we're
449 * XXX - running with logging enabled. But it's horrid
451 code = 0; /* pretend we worked - ick!!! */
453 code = 0; /* otherwise, pretend things worked */
456 slp = (struct SimpleLocks *)
457 osi_AllocSmallSpace(sizeof(struct SimpleLocks));
458 if (acom & LOCK_EX) {
463 /* Record unique id of process owning exclusive lock. */
464 avc->ownslock = getpid();
467 slp->type = LockWrite;
470 avc->flockCount = -1;
472 slp->type = LockRead;
473 slp->next = avc->slocks;
478 lockIdSet(&flock, slp, clid);
481 /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */
482 if (((code == EWOULDBLOCK) || (code == EAGAIN) ||
483 (code == UAEWOULDBLOCK) || (code == UAEAGAIN))
484 && !(acom & LOCK_NB)) {
485 /* sleep for a second, allowing interrupts */
486 ReleaseWriteLock(&avc->lock);
487 #if defined(AFS_SGI_ENV)
488 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
490 code = afs_osi_Wait(1000, NULL, 1);
491 #if defined(AFS_SGI_ENV)
492 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
494 ObtainWriteLock(&avc->lock, 120);
496 code = EINTR; /* return this if ^C typed */
503 ReleaseWriteLock(&avc->lock);
504 code = afs_CheckCode(code, areq, 1); /* defeat a buggy AIX optimization */
509 /* warn a user that a lock has been ignored */
510 afs_int32 lastWarnTime = 0; /* this is used elsewhere */
511 static afs_int32 lastWarnPid = 0;
513 DoLockWarning(afs_ucred_t * acred)
516 pid_t pid = MyPidxx2Pid(MyPidxx);
521 AFS_STATCNT(DoLockWarning);
522 /* check if we've already warned this user recently */
523 if (!((now < lastWarnTime + 120) && (lastWarnPid == pid))) {
524 procname = afs_osi_Alloc(256);
529 /* Copies process name to allocated procname, see osi_machdeps for details of macro */
530 osi_procname(procname, 256);
531 procname[255] = '\0';
533 /* otherwise, it is time to nag the user */
536 #ifdef AFS_LINUX26_ENV
538 ("afs: byte-range locks only enforced for processes on this machine (pid %d (%s), user %ld).\n", pid, procname, (long)afs_cr_uid(acred));
541 ("afs: byte-range lock/unlock ignored; make sure no one else is running this program (pid %d (%s), user %ld).\n", pid, procname, afs_cr_uid(acred));
543 afs_osi_Free(procname, 256);
549 #if defined(AFS_SGI_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
550 int afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd,
551 afs_ucred_t * acred, pid_t clid)
554 int afs_lockctl(struct vcache * avc, struct AFS_FLOCK * af, int acmd,
558 struct vrequest treq;
560 struct afs_fakestat_state fakestate;
562 AFS_STATCNT(afs_lockctl);
563 if ((code = afs_InitReq(&treq, acred)))
565 afs_InitFakeStat(&fakestate);
569 code = afs_EvalFakeStat(&avc, &fakestate, &treq);
573 #if defined(AFS_SGI_ENV)
574 if ((acmd == F_GETLK) || (acmd == F_RGETLK)) {
576 if (acmd == F_GETLK) {
578 if (af->l_type == F_UNLCK) {
582 code = HandleGetLock(avc, af, &treq, clid);
583 code = afs_CheckCode(code, &treq, 2); /* defeat buggy AIX optimz */
585 } else if ((acmd == F_SETLK) || (acmd == F_SETLKW)
586 #if defined(AFS_SGI_ENV)
587 || (acmd == F_RSETLK) || (acmd == F_RSETLKW)) {
591 /* Java VMs ask for l_len=(long)-1 regardless of OS/CPU */
592 if ((sizeof(af->l_len) == 8) && (af->l_len == 0x7fffffffffffffffLL))
594 /* next line makes byte range locks always succeed,
595 * even when they should block */
596 if (af->l_whence != 0 || af->l_start != 0 || af->l_len != 0) {
597 DoLockWarning(acred);
601 /* otherwise we can turn this into a whole-file flock */
602 if (af->l_type == F_RDLCK)
604 else if (af->l_type == F_WRLCK)
606 else if (af->l_type == F_UNLCK)
609 code = EINVAL; /* unknown lock type */
612 if (((acmd == F_SETLK)
613 #if defined(AFS_SGI_ENV)
614 || (acmd == F_RSETLK)
616 ) && code != LOCK_UN)
617 code |= LOCK_NB; /* non-blocking, s.v.p. */
618 #if defined(AFS_DARWIN_ENV)
619 code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ );
620 #elif defined(AFS_SGI_ENV)
621 AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE);
622 code = HandleFlock(avc, code, &treq, clid, 0 /*!onlymine */ );
623 AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE);
625 code = HandleFlock(avc, code, &treq, 0, 0 /*!onlymine */ );
627 code = afs_CheckCode(code, &treq, 3); /* defeat AIX -O bug */
632 afs_PutFakeStat(&fakestate);
639 * Get a description of the first lock which would
640 * block the lock specified. If the specified lock
641 * would succeed, fill in the lock structure with 'F_UNLCK'.
643 * To do that, we have to ask the server for the lock
645 * 1. The file is not locked by this machine.
646 * 2. Asking for write lock, and only the current
647 * PID has the file read locked.
650 HandleGetLock(struct vcache *avc, struct AFS_FLOCK *af,
651 struct vrequest *areq, int clid)
654 struct AFS_FLOCK flock;
656 lockIdSet(&flock, NULL, clid);
658 ObtainWriteLock(&avc->lock, 122);
659 if (avc->flockCount == 0) {
661 * We don't know ourselves, so ask the server. Unfortunately, we
662 * don't know the pid. Not even the server knows the pid. Besides,
663 * the process with the lock is on another machine
665 code = GetFlockCount(avc, areq);
666 if (code == 0 || (af->l_type == F_RDLCK && code > 0)) {
667 af->l_type = F_UNLCK;
671 af->l_type = F_RDLCK;
673 af->l_type = F_WRLCK;
676 #if defined(AFS_HAVE_FLOCK_SYSID)
682 if (af->l_type == F_RDLCK) {
684 * We want a read lock. If there are only
685 * read locks, or we are the one with the
686 * write lock, say it is unlocked.
688 if (avc->flockCount > 0 || /* only read locks */
689 !lockIdcmp2(&flock, avc, NULL, 1, clid)) {
690 af->l_type = F_UNLCK;
694 /* one write lock, but who? */
695 af->l_type = F_WRLCK; /* not us, so lock would block */
696 if (avc->slocks) { /* we know who, so tell */
697 af->l_pid = avc->slocks->pid;
698 #if defined(AFS_HAVE_FLOCK_SYSID)
699 af->l_sysid = avc->slocks->sysid;
702 af->l_pid = 0; /* XXX can't happen?? */
703 #if defined(AFS_HAVE_FLOCK_SYSID)
711 * Ok, we want a write lock. If there is a write lock
712 * already, and it is not this process, we fail.
714 if (avc->flockCount < 0) {
715 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
716 af->l_type = F_WRLCK;
718 af->l_pid = avc->slocks->pid;
719 #if defined(AFS_HAVE_FLOCK_SYSID)
720 af->l_sysid = avc->slocks->sysid;
723 af->l_pid = 0; /* XXX can't happen?? */
724 #if defined(AFS_HAVE_FLOCK_SYSID)
730 /* we are the one with the write lock */
731 af->l_type = F_UNLCK;
736 * Want a write lock, and we know there are read locks.
737 * If there is more than one, or it isn't us, we cannot lock.
739 if ((avc->flockCount > 1)
740 || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
741 struct SimpleLocks *slp;
743 af->l_type = F_RDLCK;
745 #if defined(AFS_HAVE_FLOCK_SYSID)
748 /* find a pid that isn't our own */
749 for (slp = avc->slocks; slp; slp = slp->next) {
750 if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
751 af->l_pid = slp->pid;
752 #if defined(AFS_HAVE_FLOCK_SYSID)
753 af->l_sysid = avc->slocks->sysid;
762 * Ok, we want a write lock. If there is a write lock
763 * already, and it is not this process, we fail.
765 if (avc->flockCount < 0) {
766 if (lockIdcmp2(&flock, avc, NULL, 1, clid)) {
767 af->l_type = F_WRLCK;
769 af->l_pid = avc->slocks->pid;
770 #if defined(AFS_HAVE_FLOCK_SYSID)
771 af->l_sysid = avc->slocks->sysid;
774 af->l_pid = 0; /* XXX can't happen?? */
775 #if defined(AFS_HAVE_FLOCK_SYSID)
781 /* we are the one with the write lock */
782 af->l_type = F_UNLCK;
787 * Want a write lock, and we know there are read locks.
788 * If there is more than one, or it isn't us, we cannot lock.
790 if ((avc->flockCount > 1)
791 || lockIdcmp2(&flock, avc, NULL, 1, clid)) {
792 struct SimpleLocks *slp;
793 af->l_type = F_RDLCK;
795 #if defined(AFS_HAVE_FLOCK_SYSID)
798 /* find a pid that isn't our own */
799 for (slp = avc->slocks; slp; slp = slp->next) {
800 if (lockIdcmp2(&flock, NULL, slp, 1, clid)) {
801 af->l_pid = slp->pid;
802 #if defined(AFS_HAVE_FLOCK_SYSID)
803 af->l_sysid = avc->slocks->sysid;
812 * Want a write lock, and there is just one read lock, and it
813 * is this process with a read lock. Ask the server if there
814 * are any more processes with the file locked.
816 code = GetFlockCount(avc, areq);
817 if (code == 0 || code == 1) {
818 af->l_type = F_UNLCK;
822 af->l_type = F_RDLCK;
824 af->l_type = F_WRLCK;
826 #if defined(AFS_HAVE_FLOCK_SYSID)
833 af->l_len = 0; /* to end of file */
836 ReleaseWriteLock(&avc->lock);
840 /* Get the 'flock' count from the server. This comes back in a 'spare'
841 * field from a GetStatus RPC. If we have any problems with the RPC,
842 * we lie and say the file is unlocked. If we ask any 'old' fileservers,
843 * the spare field will be a zero, saying the file is unlocked. This is
844 * OK, as a further 'lock' request will do the right thing.
847 GetFlockCount(struct vcache *avc, struct vrequest *areq)
851 struct AFSFetchStatus OutStatus;
852 struct AFSCallBack CallBack;
853 struct AFSVolSync tsync;
854 struct rx_connection *rxconn;
857 temp = areq->flags & O_NONBLOCK;
858 areq->flags |= O_NONBLOCK;
860 /* If we're disconnected, lie and say that we've got no locks. Ick */
861 if (AFS_IS_DISCONNECTED)
865 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
867 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
870 RXAFS_FetchStatus(rxconn, (struct AFSFid *)&avc->f.fid.Fid,
871 &OutStatus, &CallBack, &tsync);
877 (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
881 areq->flags &= ~O_NONBLOCK;
884 return (0); /* failed, say it is 'unlocked' */
886 return ((int)OutStatus.lockCount);