code = cm_SyncOp(scp, NULL, userp, reqp, rights,
CM_SCACHESYNC_GETSTATUS
| CM_SCACHESYNC_NEEDCALLBACK);
+
+ if (code == 0 &&
+ ((rights & PRSFS_WRITE) || (rights & PRSFS_READ)) &&
+ scp->fileType == CM_SCACHETYPE_FILE) {
+
+ cm_key_t key;
+ unsigned int sLockType;
+ LARGE_INTEGER LOffset, LLength;
+
+ /* Check if there's some sort of lock on the file at the
+ moment. */
+
+ key = cm_GenerateKey(CM_SESSION_CMINT,0,0);
+
+ if (rights & PRSFS_WRITE)
+ sLockType = 0;
+ else
+ sLockType = LOCKING_ANDX_SHARED_LOCK;
+
+ LOffset.HighPart = 1;
+ LOffset.LowPart = 0;
+ LLength.HighPart = 0;
+ LLength.LowPart = 1;
+
+ code = cm_Lock(scp, sLockType, LOffset, LLength, key, 0, userp, reqp, NULL);
+
+ if (code == 0) {
+ cm_Unlock(scp, sLockType, LOffset, LLength, key, userp, reqp);
+ } else {
+ /* In this case, we allow the file open to go through even
+ though we can't enforce mandatory locking on the
+ file. */
+ if (code == CM_ERROR_NOACCESS &&
+ !(rights & PRSFS_WRITE))
+ code = 0;
+ else
+ code = CM_ERROR_SHARING_VIOLATION;
+ }
+ }
+
lock_ReleaseMutex(&scp->mx);
return code;
code = cm_SyncOp(scp, NULL, userp, reqp, rights,
CM_SCACHESYNC_GETSTATUS
| CM_SCACHESYNC_NEEDCALLBACK);
- lock_ReleaseMutex(&scp->mx);
/*
* If the open will fail because the volume is readonly, then we will
*/
if (code == CM_ERROR_READONLY)
code = CM_ERROR_NOACCESS;
+ else if (code == 0 &&
+ ((rights & PRSFS_WRITE) || (rights & PRSFS_READ)) &&
+ scp->fileType == CM_SCACHETYPE_FILE) {
+ cm_key_t key;
+ unsigned int sLockType;
+ LARGE_INTEGER LOffset, LLength;
+
+ /* Check if there's some sort of lock on the file at the
+ moment. */
+
+ key = cm_GenerateKey(CM_SESSION_CMINT,0,0);
+ if (rights & PRSFS_WRITE)
+ sLockType = 0;
+ else
+ sLockType = LOCKING_ANDX_SHARED_LOCK;
+ LOffset.HighPart = 1;
+ LOffset.LowPart = 0;
+ LLength.HighPart = 0;
+ LLength.LowPart = 1;
+
+ code = cm_Lock(scp, sLockType, LOffset, LLength, key, 0, userp, reqp, NULL);
+
+ if (code == 0) {
+ cm_Unlock(scp, sLockType, LOffset, LLength, key, userp, reqp);
+ } else {
+ /* In this case, we allow the file open to go through even
+ though we can't enforce mandatory locking on the
+ file. */
+ if (code == CM_ERROR_NOACCESS &&
+ !(rights & PRSFS_WRITE))
+ code = 0;
+ else
+ code = CM_ERROR_SHARING_VIOLATION;
+ }
+ }
+
+ lock_ReleaseMutex(&scp->mx);
return code;
}
match = cm_stricmp(matchName, sp->searchNamep);
else
match = strcmp(matchName, sp->searchNamep);
- }
+ }
if (match != 0)
return 0;
return code;
}
-long cm_Lock(cm_scache_t *scp, unsigned char LockType,
- LARGE_INTEGER LOffset, LARGE_INTEGER LLength,
- u_long Timeout, cm_user_t *userp, cm_req_t *reqp,
- void **lockpp)
+/* Byte range locks:
+
+ The OpenAFS Windows client has to fake byte range locks given no
+ server side support for such locks. This is implemented as keyed
+ byte range locks on the cache manager.
+
+ Keyed byte range locks:
+
+ Each cm_scache_t structure keeps track of a list of keyed locks.
+ The key for a lock is essentially a token which identifies an owner
+ of a set of locks (referred to as a client). The set of keys used
+ within a specific cm_scache_t structure form a namespace that has a
+ scope of just that cm_scache_t structure. The same key value can
+ be used with another cm_scache_t structure and correspond to a
+ completely different client. However it is advantageous for the
+ SMB or IFS layer to make sure that there is a 1-1 mapping between
+ client and keys irrespective of the cm_scache_t.
+
+ Assume a client C has key Key(C) (although, since the scope of the
+ key is a cm_scache_t, the key can be Key(C,S), where S is the
+ cm_scache_t. But assume a 1-1 relation between keys and clients).
+ A byte range (O,+L) denotes byte addresses (O) through (O+L-1)
+ inclusive (a.k.a. [O,O+L-1]). The function Key(x) is implemented
+ through cm_generateKey() function for both SMB and IFS.
+
+ The cache manager will set a lock on the AFS file server in order
+ to assert the locks in S->fileLocks. If only shared locks are in
+ place for S, then the cache manager will obtain a LockRead lock,
+ while if there are any exclusive locks, it will obtain a LockWrite
+ lock. If the exclusive locks are all released while the shared
+ locks remain, then the cache manager will downgrade the lock from
+ LockWrite to LockRead.
+
+ Lock states:
+
+ A lock exists iff it is in S->fileLocks for some cm_scache_t
+ S. Existing locks are in one of the following states: ACTIVE,
+ WAITLOCK, WAITUNLOCK, LOST, DELETED.
+
+ The following sections describe each lock and the associated
+ transitions.
+
+ 1. ACTIVE: A lock L is ACTIVE iff the cache manager has asserted
+ the lock with the AFS file server. This type of lock can be
+ exercised by a client to read or write to the locked region (as
+ the lock allows).
+
+ 1.1 ACTIVE->LOST: When the AFS file server fails to extend a
+ server lock that was required to assert the lock.
+
+ 1.2 ACTIVE->DELETED: Lock is released.
+
+ 2. WAITLOCK: A lock is in a WAITLOCK state if the cache manager
+ grants the lock but the lock is yet to be asserted with the AFS
+ file server. Once the file server grants the lock, the state
+ will transition to an ACTIVE lock.
+
+ 2.1 WAITLOCK->ACTIVE: The server granted the lock.
+
+ 2.2 WAITLOCK->DELETED: Lock is abandoned, or timed out during
+ waiting.
+
+ 2.3 WAITLOCK->LOST: One or more locks from this client were
+ marked as LOST. No further locks will be granted to this
+ client until al lost locks are removed.
+
+ 3. WAITUNLOCK: A lock is in a WAITUNLOCK state if the cache manager
+ receives a request for a lock that conflicts with an existing
+ ACTIVE or WAITLOCK lock. The lock will be placed in the queue
+ and will be granted at such time the conflicting locks are
+ removed, at which point the state will transition to either
+ WAITLOCK or ACTIVE.
+
+ 3.1 WAITUNLOCK->ACTIVE: The conflicting lock was removed. The
+ current serverLock is sufficient to assert this lock, or a
+ sufficient serverLock is obtained.
+
+ 3.2 WAITUNLOCK->WAITLOCK: The conflicting lock was removed,
+ however the required serverLock is yet to be asserted with the
+ server.
+
+ 3.3 WAITUNLOCK->DELETED: The lock is abandoned or timed out.
+
+ 3.5 WAITUNLOCK->LOST: One or more locks from this client were
+ marked as LOST. No further locks will be granted to this
+ client until all lost locks are removed.
+
+ 4. LOST: A lock L is LOST if the server lock that was required to
+ assert the lock could not be obtained or if it could not be
+ extended, or if other locks by the same client were LOST.
+ Effectively, once a lock is LOST, the contract between the cache
+ manager and that specific client is no longer valid.
+
+ The cache manager rechecks the server lock once every minute and
+ extends it as appropriate. If this is not done for 5 minutes,
+ the AFS file server will release the lock. Once released, the
+ lock cannot be re-obtained without verifying that the contents
+ of the file hasn't been modified since the time the lock was
+ released. Doing so may cause data corruption.
+
+ 4.1 LOST->DELETED: The lock is released.
+
+ 4.2 LOST->ACTIVE: The lock is reassertd. This requires
+ verifying that the file was not modified in between.
+
+ 4.3 LOST->WAITLOCK: All LOST ACTIVE locks from this client were
+ reasserted. The cache manager can reinstate this waiting
+ lock.
+
+ 4.4 LOST->WAITUNLOCK: All LOST ACTIVE locks from this client
+ were reasserted. The cache manager can reinstate this waiting
+ lock.
+
+ 5. DELETED: The lock is no longer relevant. Eventually, it will
+ get removed from the cm_scache_t. In the meantime, it will be
+ treated as if it does not exist.
+
+ 5.1 DELETED->not exist: The lock is removed from the
+ cm_scache_t.
+
+ 6* A lock L is ACCEPTED if it is ACTIVE or WAITLOCK.
+ These locks have been accepted by the cache manager, but may or
+ may not have been granted back to the client.
+
+ 7* A lock L is QUEUED if it is ACTIVE, WAITLOCK or WAITUNLOCK.
+
+ 8* A lock L is EFFECTIVE if it is ACTIVE or LOST.
+
+ 9* A lock L is WAITING if it is WAITLOCK or WAITUNLOCK.
+
+ Lock operation:
+
+ A client C can READ range (Offset,+Length) of cm_scache_t S iff:
+
+ 1. for all _a_ in (Offset,+Length), one of the following is true:
+
+ 1.1 There does NOT exist an ACTIVE lock L in S->fileLocks such
+ that _a_ in (L->LOffset,+L->LLength) (IOW: byte _a_ of S is
+ unowned)
+
+ AND
+
+ For each LOST lock M in S->fileLocks such that
+ _a_ in (M->LOffset,+M->LLength), M->LockType is shared AND
+ M->key != Key(C).
+
+ (Note: If this is a different client from one whose shared
+ lock was LOST, then the contract between this client and the
+ cache manager is indistinguishable from that where no lock
+ was lost. If an exclusive lock was lost, then the range is
+ considered unsafe for consumption.)
+
+ 1.3 There is an ACTIVE lock L in S->fileLocks such that: L->key
+ == Key(C) && _a_ in (L->LOffset,+L->LLength) (IOW: byte _a_
+ of S is owned by C under lock L)
+
+ 1.4 There is an ACTIVE lock L in S->fileLocks such that _a_ in
+ (L->LOffset,L->+LLength) && L->LockType is shared (IOW: byte
+ _a_ of S is shared) AND there is no LOST lock M such that _a_
+ in (M->LOffset,+M->LLength) and M->key == Key(C)
+
+ A client C can WRITE range (Offset,+Length) of cm_scache_t S iff:
+
+ 2. for all _a_ in (Offset,+Length), one of the following is true:
+
+ 2.1 Byte _a_ of S is unowned (as above) AND for each LOST lock
+ L in S->fileLocks _a_ NOT in (L->LOffset,+L->LLength).
+
+ 2.2 Byte _a_ of S is owned by C under lock L (as above) AND
+ L->LockType is exclusive.
+
+ A client C can OBTAIN a lock L on cm_scache_t S iff:
+
+ 3. for all _a_ in (L->LOffset,+L->LLength), ALL of the following is
+ true:
+
+ 3.1 L->LockType is exclusive IMPLIES there does NOT exist a QUEUED lock
+ M in S->fileLocks such that _a_ in (M->LOffset,+M->LLength).
+
+ (Note: If we count all QUEUED locks then we hit cases such as
+ cascading waiting locks where the locks later on in the queue
+ can be granted without compromising file integrity. On the
+ other hand if only ACCEPTED locks are considered, then locks
+ that were received earlier may end up waiting for locks that
+ were received later to be unlocked. The choice of QUEUED
+ locks were made so that large locks don't consistently get
+ trumped by smaller locks which were requested later.)
+
+ 3.2 L->LockType is shared IMPLIES for each QUEUED lock M in
+ S->fileLocks, if _a_ in (M->LOffset,+M->LLength) then
+ M->LockType is shared.
+
+ 4. For each LOST lock M in S->fileLocks, M->key != Key(C)
+
+ (Note: If a client loses a lock, it loses all locks.
+ Subsequently, it will not be allowed to obtain any more locks
+ until all existing LOST locks that belong to the client are
+ released. Once all locks are released by a single client,
+ there exists no further contract between the client and AFS
+ about the contents of the file, hence the client can then
+ proceed to obtain new locks and establish a new contract.)
+
+ A client C can only unlock locks L in S->fileLocks which have
+ L->key == Key(C).
+
+ The representation and invariants are as follows:
+
+ - Each cm_scache_t structure keeps:
+
+ - A queue of byte-range locks (cm_scache_t::fileLocks) which
+ are of type cm_file_lock_t.
+
+ - A record of the highest server-side lock that has been
+ obtained for this object (cm_scache_t::serverLock), which is
+ one of (-1), LockRead, LockWrite.
+
+ - A count of ACCEPTED exclusive and shared locks that are in the
+ queue (cm_scache_t::sharedLocks and
+ cm_scache_t::exclusiveLocks)
+
+ - Each cm_file_lock_t structure keeps:
+
+ - The type of lock (cm_file_lock_t::LockType)
+
+ - The key associated with the lock (cm_file_lock_t::key)
+
+ - The offset and length of the lock (cm_file_lock_t::LOffset
+ and cm_file_lock_t::LLength)
+
+ - The state of the lock.
+
+ - Time of issuance or last successful extension
+
+ Semantic invariants:
+
+ I1. The number of ACCEPTED locks in S->fileLocks are
+ (S->sharedLocks + S->exclusiveLocks)
+
+ External invariants:
+
+ I3. S->serverLock is the lock that we have asserted with the
+ AFS file server for this cm_scache_t.
+
+ I4. S->serverLock == LockRead iff there is at least one ACTIVE
+ shared lock, but no ACTIVE exclusive locks.
+
+ I5. S->serverLock == LockWrite iff there is at least one ACTIVE
+ exclusive lock.
+
+ I6. If a WAITUNLOCK lock L exists in S->fileLocks, then all
+ locks that L is waiting on are ahead of L in S->fileLocks.
+
+ I7. If L is a LOST lock, then for each lock M in S->fileLocks,
+ M->key == L->key IMPLIES M is LOST or DELETED.
+
+ --asanka
+ */
+
+#define IS_LOCK_ACTIVE(lockp) (((lockp)->flags & (CM_FILELOCK_FLAG_DELETED|CM_FILELOCK_FLAG_WAITLOCK|CM_FILELOCK_FLAG_WAITUNLOCK|CM_FILELOCK_FLAG_LOST))==0)
+
+#define IS_LOCK_WAITLOCK(lockp) (((lockp)->flags & (CM_FILELOCK_FLAG_DELETED|CM_FILELOCK_FLAG_WAITLOCK|CM_FILELOCK_FLAG_WAITUNLOCK|CM_FILELOCK_FLAG_LOST)) == CM_FILELOCK_FLAG_WAITLOCK)
+
+#define IS_LOCK_WAITUNLOCK(lockp) (((lockp)->flags & (CM_FILELOCK_FLAG_DELETED|CM_FILELOCK_FLAG_WAITLOCK|CM_FILELOCK_FLAG_WAITUNLOCK|CM_FILELOCK_FLAG_LOST)) == CM_FILELOCK_FLAG_WAITUNLOCK)
+
+#define IS_LOCK_LOST(lockp) (((lockp)->flags & (CM_FILELOCK_FLAG_DELETED|CM_FILELOCK_FLAG_LOST)) == CM_FILELOCK_FLAG_LOST)
+
+#define IS_LOCK_DELETED(lockp) (((lockp)->flags & CM_FILELOCK_FLAG_DELETED) == CM_FILELOCK_FLAG_DELETED)
+
+/* the following macros are unsafe */
+#define IS_LOCK_ACCEPTED(lockp) (IS_LOCK_ACTIVE(lockp) || IS_LOCK_WAITLOCK(lockp))
+
+#define IS_LOCK_QUEUED(lockp) (IS_LOCK_ACTIVE(lockp) || IS_LOCK_WAITLOCK(lockp) || IS_LOCK_WAITUNLOCK(lockp))
+
+#define IS_LOCK_EFFECTIVE(lockp) (IS_LOCK_ACTIVE(lockp) || IS_LOCK_LOST(lockp))
+
+#define IS_LOCK_CLIENTONLY(lockp) (((lockp)->flags & CM_FILELOCK_FLAG_CLIENTONLY) == CM_FILELOCK_FLAG_CLIENTONLY)
+
+#define INTERSECT_RANGE(r1,r2) (((r2).offset+(r2).length) > (r1).offset && ((r1).offset +(r1).length) > (r2).offset)
+
+#define CONTAINS_RANGE(r1,r2) (((r2).offset+(r2).length) <= ((r1).offset+(r1).length) && (r1).offset <= (r2).offset)
+
+static void cm_LockRangeSubtract(cm_range_t * pos, const cm_range_t * neg)
{
- long code;
- int Which = ((LockType & LOCKING_ANDX_SHARED_LOCK) ? LockRead : LockWrite);
+ afs_int64 int_begin;
+ afs_int64 int_end;
+
+ int_begin = MAX(pos->offset, neg->offset);
+ int_end = MIN(pos->offset+pos->length, neg->offset+neg->length);
+
+ if(int_begin < int_end) {
+ if(int_begin == pos->offset) {
+ pos->length = pos->offset + pos->length - int_end;
+ pos->offset = int_end;
+ } else if(int_end == pos->offset + pos->length) {
+ pos->offset = int_begin;
+ pos->length = int_end - int_begin;
+ }
+ }
+}
+
+/* Called with scp->mx held. Returns 0 if all is clear to read the
+ specified range by the client identified by key.
+ */
+long cm_LockCheckRead(cm_scache_t *scp,
+ LARGE_INTEGER LOffset,
+ LARGE_INTEGER LLength,
+ cm_key_t key)
+{
+#ifndef ADVISORY_LOCKS
+
+ cm_file_lock_t *fileLock;
+ osi_queue_t *q;
+ long code = 0;
+ cm_range_t range;
+ int substract_ranges = FALSE;
+
+ range.offset = LOffset.QuadPart;
+ range.length = LLength.QuadPart;
+
+ /*
+ 1. for all _a_ in (Offset,+Length), one of the following is true:
+
+ 1.1 There does NOT exist an ACTIVE lock L in S->fileLocks such
+ that _a_ in (L->LOffset,+L->LLength) (IOW: byte _a_ of S is
+ unowned)
+
+ AND
+
+ For each LOST lock M in S->fileLocks such that
+ _a_ in (M->LOffset,+M->LLength), M->LockType is shared AND
+ M->key != Key(C).
+
+ 1.3 There is an ACTIVE lock L in S->fileLocks such that: L->key
+ == Key(C) && _a_ in (L->LOffset,+L->LLength) (IOW: byte _a_
+ of S is owned by C under lock L)
+
+ 1.4 There is an ACTIVE lock L in S->fileLocks such that _a_ in
+ (L->LOffset,L->+LLength) && L->LockType is shared (IOW: byte
+ _a_ of S is shared) AND there is no LOST lock M such that _a_
+ in (M->LOffset,+M->LLength) and M->key == Key(C)
+ */
+
+ lock_ObtainRead(&cm_scacheLock);
+
+ for(q = scp->fileLocksH; q && range.length > 0; q = osi_QNext(q)) {
+ fileLock =
+ (cm_file_lock_t *)((char *) q - offsetof(cm_file_lock_t, fileq));
+
+#if 0
+ if(IS_LOCK_DELETED(fileLock) ||
+ (IS_LOCK_LOST(fileLock) &&
+ fileLock->lockType == LockRead &&
+ fileLock->key != key))
+ continue;
+#endif
+
+ if(INTERSECT_RANGE(range, fileLock->range)) {
+ if(IS_LOCK_ACTIVE(fileLock)) {
+ if(fileLock->key == key) {
+
+ /* if there is an active lock for this client, it
+ is safe to substract ranges */
+ cm_LockRangeSubtract(&range, &fileLock->range);
+ substract_ranges = TRUE;
+ } else {
+ if(fileLock->lockType != LockRead) {
+ code = CM_ERROR_LOCK_CONFLICT;
+ break;
+ }
+
+ /* even if the entire range is locked for reading,
+ we still can't grant the lock at this point
+ because the client may have lost locks. That
+ is, unless we have already seen an active lock
+ belonging to the client, in which case there
+ can't be any lost locks. */
+ if(substract_ranges)
+ cm_LockRangeSubtract(&range, &fileLock->range);
+ }
+ } else if(IS_LOCK_LOST(fileLock)
+#if 0
+ /* Uncomment for less aggressive handling of
+ lost locks. */
+ &&
+ (fileLock->key == key || fileLock->lockType == LockWrite)
+#endif
+ ) {
+ code = CM_ERROR_BADFD;
+ break;
+ }
+ } else if (IS_LOCK_LOST(fileLock)) {
+ code = CM_ERROR_BADFD;
+ break;
+ }
+ }
+
+ lock_ReleaseRead(&cm_scacheLock);
+
+ return code;
+
+#else
+
+ return 0;
+
+#endif
+}
+
+/* Called with scp->mx held. Returns 0 if all is clear to write the
+ specified range by the client identified by key.
+ */
+long cm_LockCheckWrite(cm_scache_t *scp,
+ LARGE_INTEGER LOffset,
+ LARGE_INTEGER LLength,
+ cm_key_t key)
+{
+#ifndef ADVISORY_LOCKS
+
+ cm_file_lock_t *fileLock;
+ osi_queue_t *q;
+ long code = 0;
+ cm_range_t range;
+
+ range.offset = LOffset.QuadPart;
+ range.length = LLength.QuadPart;
+
+ /*
+ A client C can WRITE range (Offset,+Length) of cm_scache_t S iff:
+
+ 2. for all _a_ in (Offset,+Length), one of the following is true:
+
+ 2.1 Byte _a_ of S is unowned (as above) AND for each LOST lock
+ L in S->fileLocks _a_ NOT in (L->LOffset,+L->LLength).
+
+ 2.2 Byte _a_ of S is owned by C under lock L (as above) AND
+ L->LockType is exclusive.
+
+ */
+
+ lock_ObtainRead(&cm_scacheLock);
+
+ for(q = scp->fileLocksH; q && range.length > 0; q = osi_QNext(q)) {
+ fileLock =
+ (cm_file_lock_t *)((char *) q - offsetof(cm_file_lock_t, fileq));
+
+#if 0
+ if(IS_LOCK_DELETED(fileLock) ||
+ (IS_LOCK_LOST(fileLock) &&
+ fileLock->lockType == LockRead &&
+ fileLock->key != key))
+ continue;
+#endif
+
+ if(INTERSECT_RANGE(range, fileLock->range)) {
+ if(IS_LOCK_ACTIVE(fileLock)) {
+ if(fileLock->key == key) {
+ if(fileLock->lockType == LockWrite) {
+
+ /* if there is an active lock for this client, it
+ is safe to substract ranges */
+ cm_LockRangeSubtract(&range, &fileLock->range);
+ } else {
+ code = CM_ERROR_LOCK_CONFLICT;
+ break;
+ }
+ } else {
+ code = CM_ERROR_LOCK_CONFLICT;
+ break;
+ }
+ } else if(IS_LOCK_LOST(fileLock)) {
+ code = CM_ERROR_BADFD;
+ break;
+ }
+ } else if (IS_LOCK_LOST(fileLock)) {
+ code = CM_ERROR_BADFD;
+ break;
+ }
+ }
+
+ lock_ReleaseRead(&cm_scacheLock);
+
+ return code;
+
+#else
+
+ return 0;
+
+#endif
+}
+
+/* Forward dcl. */
+static void cm_LockMarkSCacheLost(cm_scache_t * scp);
+
+/* Called with cm_scacheLock write locked */
+static cm_file_lock_t * cm_GetFileLock(void) {
+ cm_file_lock_t * l;
+
+ l = (cm_file_lock_t *) cm_freeFileLocks;
+ if(l) {
+ osi_QRemove(&cm_freeFileLocks, &l->q);
+ } else {
+ l = malloc(sizeof(cm_file_lock_t));
+ osi_assert(l);
+ }
+
+ memset(l, 0, sizeof(cm_file_lock_t));
+
+ return l;
+}
+
+/* Called with cm_scacheLock write locked */
+static void cm_PutFileLock(cm_file_lock_t *l) {
+ osi_QAdd(&cm_freeFileLocks, &l->q);
+}
+
+/* called with scp->mx held */
+long cm_Lock(cm_scache_t *scp, unsigned char sLockType,
+ LARGE_INTEGER LOffset, LARGE_INTEGER LLength,
+ cm_key_t key,
+ int allowWait, cm_user_t *userp, cm_req_t *reqp,
+ cm_file_lock_t **lockpp)
+{
+ long code = 0;
+ int Which = ((sLockType & LOCKING_ANDX_SHARED_LOCK) ? LockRead : LockWrite);
AFSFid tfid;
AFSVolSync volSync;
cm_conn_t *connp;
cm_file_lock_t *fileLock;
osi_queue_t *q;
- int found = 0;
struct rx_connection * callp;
+ cm_range_t range;
+ int wait_unlock = FALSE;
- osi_Log1(afsd_logp, "cm_Lock scp 0x%x ...", (long) scp);
- osi_Log4(afsd_logp, "cm_Lock type 0x%x offset %d length %d timeout %d",
- LockType, (unsigned long)LOffset.QuadPart, (unsigned long)LLength.QuadPart, Timeout);
+ osi_Log4(afsd_logp, "cm_Lock scp 0x%x type 0x%x offset %d length %d",
+ scp, sLockType, (unsigned long)LOffset.QuadPart, (unsigned long)LLength.QuadPart);
+ osi_Log3(afsd_logp, "... allowWait %d key 0x%x:%x", allowWait,
+ (unsigned long)(key >> 32), (unsigned long)(key & 0xffffffff));
- /* Look for a conflict. Also, if we are asking for a shared lock,
- * look for another shared lock, so we don't have to do an RPC.
- */
- q = scp->fileLocks;
- while (q) {
- fileLock = (cm_file_lock_t *)((char *) q - offsetof(cm_file_lock_t, fileq));
- if ((fileLock->flags & (CM_FILELOCK_FLAG_INVALID | CM_FILELOCK_FLAG_WAITING)) == 0) {
- if ((LockType & LOCKING_ANDX_SHARED_LOCK) == 0 ||
- (fileLock->LockType & LOCKING_ANDX_SHARED_LOCK) == 0)
- return CM_ERROR_WOULDBLOCK;
- found = 1;
+ /*
+ A client C can OBTAIN a lock L on cm_scache_t S iff:
+
+ 3. for all _a_ in (L->LOffset,+L->LLength), ALL of the following is
+ true:
+
+ 3.1 L->LockType is exclusive IMPLIES there does NOT exist a QUEUED lock
+ M in S->fileLocks such that _a_ in (M->LOffset,+M->LLength).
+
+ 3.2 L->LockType is shared IMPLIES for each QUEUED lock M in
+ S->fileLocks, if _a_ in (M->LOffset,+M->LLength) then
+ M->LockType is shared.
+
+ 4. For each LOST lock M in S->fileLocks, M->key != Key(C)
+ */
+
+ range.offset = LOffset.QuadPart;
+ range.length = LLength.QuadPart;
+
+ lock_ObtainRead(&cm_scacheLock);
+
+ for(q = scp->fileLocksH; q; q = osi_QNext(q)) {
+ fileLock =
+ (cm_file_lock_t *)((char *) q - offsetof(cm_file_lock_t, fileq));
+
+ if(IS_LOCK_LOST(fileLock) && fileLock->key == key) {
+ code = CM_ERROR_BADFD;
+ break;
+ }
+
+ /* we don't need to check for deleted locks here since deleted
+ locks are dequeued from fileLocks */
+ if(INTERSECT_RANGE(range, fileLock->range)) {
+
+ if((sLockType & LOCKING_ANDX_SHARED_LOCK) == 0 ||
+ fileLock->lockType != LockRead) {
+ wait_unlock = TRUE;
+ code = CM_ERROR_WOULDBLOCK;
+ break;
+ }
}
- q = osi_QNext(q);
}
- osi_Log1(afsd_logp, "cm_Lock found = %d", found);
+ lock_ReleaseRead(&cm_scacheLock);
+
+ if(code == 0 && !(scp->flags & CM_SCACHEFLAG_RO)) {
+ if(Which == scp->serverLock ||
+ (Which == LockRead && scp->serverLock == LockWrite)) {
+
+ /* we already have the lock we need */
+ osi_Log3(afsd_logp, " we already have the correct lock. exclusives[%d], shared[%d], serverLock[%d]",
+ scp->exclusiveLocks, scp->sharedLocks, (int)(signed char) scp->serverLock);
+ code = 0; /* redundant */
+
+ } else if((scp->exclusiveLocks > 0) ||
+ (scp->sharedLocks > 0 && scp->serverLock != LockRead)) {
+
+ /* We are already waiting for some other lock. We should
+ wait for the daemon to catch up instead of generating a
+ flood of SetLock calls. */
+ osi_Log3(afsd_logp, " already waiting for other lock. exclusives[%d], shared[%d], serverLock[%d]",
+ scp->exclusiveLocks, scp->sharedLocks, (int)(signed char) scp->serverLock);
+ code = CM_ERROR_WOULDBLOCK;
+
+ } else {
+ cm_fid_t cfid;
+ int newLock;
+
+ if (scp->serverLock == LockRead && Which == LockWrite) {
+
+ /* We want to escalate the lock to a LockWrite.
+ Unfortunately that's not really possible without
+ letting go of the current lock. But for now we do
+ it anyway. */
+
+ osi_Log0(afsd_logp, " attempting to UPGRADE from LockRead to LockWrite.");
+
+ tfid.Volume = scp->fid.volume;
+ tfid.Vnode = scp->fid.vnode;
+ tfid.Unique = scp->fid.unique;
+ cfid = scp->fid;
+
+ lock_ReleaseMutex(&scp->mx);
+
+ osi_Log1(afsd_logp, "CALL ReleaseLock scp 0x%x", (long) scp);
+
+ do {
+ code = cm_Conn(&cfid, userp, reqp, &connp);
+ if (code)
+ break;
+
+ callp = cm_GetRxConn(connp);
+ code = RXAFS_ReleaseLock(callp, &tfid, &volSync);
+ rx_PutConnection(callp);
+
+ } while (cm_Analyze(connp, userp, reqp, &cfid, &volSync,
+ NULL, NULL, code));
+ code = cm_MapRPCError(code, reqp);
+
+ if (code)
+ osi_Log1(afsd_logp, "CALL ReleaseLock FAILURE, code 0x%x", code);
+ else
+ osi_Log0(afsd_logp, "CALL ReleaseLock SUCCESS");
+
+ lock_ObtainMutex(&scp->mx);
+
+ if (code) {
+ /* We couldn't release the lock */
+ goto check_code;
+ } else {
+ scp->serverLock = -1;
+ }
+ }
+
+ /* We need to obtain a server lock of type Which in order
+ to assert this file lock */
+ tfid.Volume = scp->fid.volume;
+ tfid.Vnode = scp->fid.vnode;
+ tfid.Unique = scp->fid.unique;
+ cfid = scp->fid;
+
+#ifndef AGGRESSIVE_LOCKS
+ newLock = Which;
+#else
+ newLock = LockWrite;
+#endif
+ osi_Log3(afsd_logp, "CALL SetLock scp 0x%x from %d to %d", (long) scp, (int) scp->serverLock, newLock);
+
+ lock_ReleaseMutex(&scp->mx);
+
+ do {
+ code = cm_Conn(&cfid, userp, reqp, &connp);
+ if (code)
+ break;
+
+ callp = cm_GetRxConn(connp);
+ code = RXAFS_SetLock(callp, &tfid, newLock,
+ &volSync);
+ rx_PutConnection(callp);
+
+ } while (cm_Analyze(connp, userp, reqp, &cfid, &volSync,
+ NULL, NULL, code));
+
+ code = cm_MapRPCError(code, reqp);
+
+ if (code) {
+ osi_Log1(afsd_logp, "CALL SetLock FAILURE, code 0x%x", code);
+ } else {
+ osi_Log0(afsd_logp, "CALL SetLock SUCCESS");
+ }
+
+ if (code == CM_ERROR_WOULDBLOCK && newLock != Which) {
+ /* we wanted LockRead. We tried LockWrite. Now try LockRead again */
+ newLock = Which;
+
+ /* am I sane? */
+ osi_assert(newLock == LockRead);
+
+ osi_Log3(afsd_logp, "CALL SetLock AGAIN scp 0x%x from %d to %d",
+ (long) scp, (int) scp->serverLock, newLock);
+
+ do {
+ code = cm_Conn(&cfid, userp, reqp, &connp);
+ if (code)
+ break;
+
+ callp = cm_GetRxConn(connp);
+ code = RXAFS_SetLock(callp, &tfid, newLock,
+ &volSync);
+ rx_PutConnection(callp);
+
+ } while (cm_Analyze(connp, userp, reqp, &cfid, &volSync,
+ NULL, NULL, code));
+
+ code = cm_MapRPCError(code, reqp);
+
+ if (code) {
+ osi_Log1(afsd_logp, "CALL SetLock FAILURE AGAIN, code 0x%x", code);
+ } else {
+ osi_Log0(afsd_logp, "CALL SetLock SUCCESS");
+ }
+ }
+
+ lock_ObtainMutex(&scp->mx);
+
+ if(code == 0)
+ scp->serverLock = newLock;
+ else {
+ if ((scp->sharedLocks > 0 || scp->exclusiveLocks > 0) &&
+ scp->serverLock == -1) {
+ /* Oops. We lost the lock. */
+ cm_LockMarkSCacheLost(scp);
+ }
+ }
+ }
+ } else if (scp->flags & CM_SCACHEFLAG_RO) {
+ osi_Log0(afsd_logp, " Skipping server lock for RO scp");
+ }
+
+ check_code:
+
+ if (code != 0) {
+ /* Special case error translations
+
+ Applications don't expect certain errors from a
+ LockFile/UnlockFile call. We need to translate some error
+ code to codes that apps expect and handle. */
+
+ /* We shouldn't actually need to handle this case since we
+ simulate locks for RO scps anyway. */
+ if (code == CM_ERROR_READONLY) {
+ osi_Log0(afsd_logp, " Reinterpreting CM_ERROR_READONLY as CM_ERROR_NOACCESS");
+ code = CM_ERROR_NOACCESS;
+ }
+ }
+
+ if (code == 0 || (code == CM_ERROR_WOULDBLOCK && allowWait)) {
+
+ lock_ObtainWrite(&cm_scacheLock);
+ fileLock = cm_GetFileLock();
+ lock_ReleaseWrite(&cm_scacheLock);
+#ifdef DEBUG
+ fileLock->fid = scp->fid;
+#endif
+ fileLock->key = key;
+ fileLock->lockType = Which;
+ cm_HoldUser(userp);
+ fileLock->userp = userp;
+ fileLock->range = range;
+ fileLock->flags = (code == 0 ? 0 :
+ ((wait_unlock)?
+ CM_FILELOCK_FLAG_WAITUNLOCK :
+ CM_FILELOCK_FLAG_WAITLOCK));
+ if (scp->flags & CM_SCACHEFLAG_RO)
+ fileLock->flags |= CM_FILELOCK_FLAG_CLIENTONLY;
+
+ fileLock->lastUpdate = (code == 0) ? time(NULL) : 0;
+
+ osi_QAddT(&scp->fileLocksH, &scp->fileLocksT, &fileLock->fileq);
+
+ lock_ObtainWrite(&cm_scacheLock);
+ cm_HoldSCacheNoLock(scp);
+ fileLock->scp = scp;
+ osi_QAdd(&cm_allFileLocks, &fileLock->q);
+ lock_ReleaseWrite(&cm_scacheLock);
+
+ if (code != 0) {
+ *lockpp = fileLock;
+ }
+
+ if (IS_LOCK_ACCEPTED(fileLock)) {
+ if(Which == LockRead)
+ scp->sharedLocks++;
+ else
+ scp->exclusiveLocks++;
+ }
+
+ osi_Log1(afsd_logp, "cm_Lock Lock added 0x%x", (long) fileLock);
+ osi_Log4(afsd_logp, " scp[0x%x] exclusives[%d] shared[%d] serverLock[%d]",
+ scp, scp->exclusiveLocks, scp->sharedLocks, (int)(signed char) scp->serverLock);
+ }
+
+ return code;
+}
+
+static int cm_KeyEquals(cm_key_t k1, cm_key_t k2, int flags);
+
+/* Called with scp->mx held */
+long cm_UnlockByKey(cm_scache_t * scp,
+ cm_key_t key,
+ int flags,
+ cm_user_t * userp,
+ cm_req_t * reqp)
+{
+ long code = 0;
+ AFSFid tfid;
+ AFSVolSync volSync;
+ cm_conn_t *connp;
+ cm_file_lock_t *fileLock;
+ osi_queue_t *q, *qn;
+ struct rx_connection * callp;
+ int n_unlocks = 0;
+
+ osi_Log3(afsd_logp, "cm_UnlockByKey scp 0x%x key 0x%x:%x",
+ (long) scp, (unsigned long)(key >> 32), (unsigned long)(key & 0xffffffff));
+
+ lock_ObtainWrite(&cm_scacheLock);
+
+ for(q = scp->fileLocksH; q; q = qn) {
+ qn = osi_QNext(q);
+
+ fileLock = (cm_file_lock_t *)
+ ((char *) q - offsetof(cm_file_lock_t, fileq));
+
+#ifdef DEBUG
+ osi_Log4(afsd_logp, " Checking lock[0x%x] range[%d,+%d] type[%d]",
+ fileLock, (unsigned long) fileLock->range.offset, (unsigned long) fileLock->range.length,
+ fileLock->lockType);
+ osi_Log3(afsd_logp, " key[0x%x:%x] flags[0x%x]",
+ (unsigned long)(fileLock->key >> 32),
+ (unsigned long)(fileLock->key & 0xffffffff),
+ fileLock->flags);
+
+ if(cm_FidCmp(&fileLock->fid, &fileLock->scp->fid)) {
+ osi_Log0(afsd_logp, "!!fileLock->fid != scp->fid");
+ osi_Log4(afsd_logp, " fileLock->fid(cell=[%d], volume=[%d], vnode=[%d], unique=[%d]",
+ fileLock->fid.cell,
+ fileLock->fid.volume,
+ fileLock->fid.vnode,
+ fileLock->fid.unique);
+ osi_Log4(afsd_logp, " scp->fid(cell=[%d], volume=[%d], vnode=[%d], unique=[%d]",
+ fileLock->scp->fid.cell,
+ fileLock->scp->fid.volume,
+ fileLock->scp->fid.vnode,
+ fileLock->scp->fid.unique);
+ osi_assert(FALSE);
+ }
+#endif
+
+ if (!IS_LOCK_DELETED(fileLock) &&
+ cm_KeyEquals(fileLock->key, key, flags)) {
+ osi_Log3(afsd_logp, "...Unlock range [%d,+%d] type %d",
+ fileLock->range.offset,
+ fileLock->range.length,
+ fileLock->lockType);
+
+ if (scp->fileLocksT == q)
+ scp->fileLocksT = osi_QPrev(q);
+ osi_QRemove(&scp->fileLocksH,q);
+
+ if(IS_LOCK_ACCEPTED(fileLock)) {
+ if(fileLock->lockType == LockRead)
+ scp->sharedLocks--;
+ else
+ scp->exclusiveLocks--;
+ }
+
+ fileLock->flags |= CM_FILELOCK_FLAG_DELETED;
+
+ cm_ReleaseUser(fileLock->userp);
+ cm_ReleaseSCacheNoLock(scp);
+
+ fileLock->userp = NULL;
+ fileLock->scp = NULL;
+
+ n_unlocks++;
+ }
+ }
+
+ lock_ReleaseWrite(&cm_scacheLock);
+
+ if(n_unlocks == 0) {
+ osi_Log0(afsd_logp, "cm_UnlockByKey no locks found");
+ osi_Log3(afsd_logp, " Leaving scp with exclusives[%d], shared[%d], serverLock[%d]",
+ scp->exclusiveLocks, scp->sharedLocks, (int)(signed char) scp->serverLock);
+
+ return 0;
+ }
+
+ osi_Log1(afsd_logp, "cm_UnlockByKey done with %d locks", n_unlocks);
+
+ osi_assertx(scp->sharedLocks >= 0, "scp->sharedLocks < 0");
+ osi_assertx(scp->exclusiveLocks >= 0, "scp->exclusiveLocks < 0");
+
+ if (scp->flags & CM_SCACHEFLAG_RO) {
+ osi_Log0(afsd_logp, " Skipping server lock for RO scp");
+ goto done;
+ }
+
+ /* Ideally we would go through the rest of the locks to determine
+ * if one or more locks that were formerly in WAITUNLOCK can now
+ * be put to ACTIVE or WAITLOCK and update scp->exclusiveLocks and
+ * scp->sharedLocks accordingly. However, the retrying of locks
+ * in that manner is done cm_RetryLock() manually.
+ */
+
+ if (scp->serverLock == LockWrite && scp->exclusiveLocks == 0 && scp->sharedLocks > 0) {
+
+ cm_fid_t cfid;
+
+ /* The serverLock should be downgraded to LockRead */
+ osi_Log0(afsd_logp, " DOWNGRADE lock from LockWrite to LockRead");
- if (found)
- code = 0;
- else {
tfid.Volume = scp->fid.volume;
tfid.Vnode = scp->fid.vnode;
tfid.Unique = scp->fid.unique;
+ cfid = scp->fid;
+
lock_ReleaseMutex(&scp->mx);
+
+ osi_Log1(afsd_logp, "CALL ReleaseLock scp 0x%x", (long) scp);
+
do {
- osi_Log1(afsd_logp, "CALL SetLock scp 0x%x", (long) scp);
- code = cm_Conn(&scp->fid, userp, reqp, &connp);
+ code = cm_Conn(&cfid, userp, reqp, &connp);
if (code)
break;
callp = cm_GetRxConn(connp);
- code = RXAFS_SetLock(callp, &tfid, Which,
- &volSync);
+ code = RXAFS_ReleaseLock(callp, &tfid, &volSync);
rx_PutConnection(callp);
+
+ } while (cm_Analyze(connp, userp, reqp, &cfid, &volSync,
+ NULL, NULL, code));
+ code = cm_MapRPCError(code, reqp);
- } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync,
- NULL, NULL, code));
if (code)
- osi_Log1(afsd_logp, "CALL SetLock FAILURE, code 0x%x", code);
+ osi_Log1(afsd_logp, "CALL ReleaseLock FAILURE, code 0x%x", code);
else
+ osi_Log0(afsd_logp, "CALL ReleaseLock SUCCESS");
+
+ lock_ObtainMutex(&scp->mx);
+
+ if (code) {
+ /* so we couldn't release it. Just let the lock be for now */
+ code = 0;
+ goto done;
+ } else {
+ scp->serverLock = -1;
+ }
+
+ tfid.Volume = scp->fid.volume;
+ tfid.Vnode = scp->fid.vnode;
+ tfid.Unique = scp->fid.unique;
+ cfid = scp->fid;
+
+ osi_Log3(afsd_logp, "CALL SetLock scp 0x%x from %d to %d", (long) scp, (int) scp->serverLock, LockRead);
+
+ lock_ReleaseMutex(&scp->mx);
+
+ do {
+
+ code = cm_Conn(&cfid, userp, reqp, &connp);
+ if (code)
+ break;
+
+ callp = cm_GetRxConn(connp);
+ code = RXAFS_SetLock(callp, &tfid, LockRead,
+ &volSync);
+
+ rx_PutConnection(callp);
+
+ } while (cm_Analyze(connp, userp, reqp, &cfid, &volSync,
+ NULL, NULL, code));
+
+ if (code)
+ osi_Log1(afsd_logp, "CALL SetLock FAILURE, code 0x%x", code);
+ else {
osi_Log0(afsd_logp, "CALL SetLock SUCCESS");
+ }
+
lock_ObtainMutex(&scp->mx);
+
+ if(code == 0)
+ scp->serverLock = LockRead;
+ else {
+ if ((scp->sharedLocks > 0 || scp->exclusiveLocks > 0) &&
+ (scp->serverLock == -1)) {
+ /* Oopsie */
+ cm_LockMarkSCacheLost(scp);
+ }
+ }
+
+ /* failure here has no bearing on the return value of
+ cm_Unlock() */
+ code = 0;
+
+ } else if(scp->serverLock != (-1) && scp->exclusiveLocks == 0 && scp->sharedLocks == 0) {
+ cm_fid_t cfid;
+
+ /* The serverLock should be released entirely */
+
+ tfid.Volume = scp->fid.volume;
+ tfid.Vnode = scp->fid.vnode;
+ tfid.Unique = scp->fid.unique;
+ cfid = scp->fid;
+
+ lock_ReleaseMutex(&scp->mx);
+
+ osi_Log1(afsd_logp, "CALL ReleaseLock scp 0x%x", (long) scp);
+
+ do {
+ code = cm_Conn(&cfid, userp, reqp, &connp);
+ if (code)
+ break;
+
+ callp = cm_GetRxConn(connp);
+ code = RXAFS_ReleaseLock(callp, &tfid, &volSync);
+ rx_PutConnection(callp);
+
+ } while (cm_Analyze(connp, userp, reqp, &cfid, &volSync,
+ NULL, NULL, code));
code = cm_MapRPCError(code, reqp);
- }
- if (code == 0 || Timeout != 0) {
- fileLock = malloc(sizeof(cm_file_lock_t));
- fileLock->LockType = LockType;
- cm_HoldUser(userp);
- fileLock->userp = userp;
- fileLock->fid = scp->fid;
- fileLock->LOffset = LOffset;
- fileLock->LLength = LLength;
- fileLock->flags = (code == 0 ? 0 : CM_FILELOCK_FLAG_WAITING);
- osi_QAdd(&scp->fileLocks, &fileLock->fileq);
- lock_ObtainWrite(&cm_scacheLock);
- osi_QAdd(&cm_allFileLocks, &fileLock->q);
- lock_ReleaseWrite(&cm_scacheLock);
- if (code != 0)
- *lockpp = fileLock;
- osi_Log1(afsd_logp, "cm_Lock Lock added 0x%x", (long) fileLock);
+ if (code)
+ osi_Log1(afsd_logp, "CALL ReleaseLock FAILURE, code 0x%x", code);
+ else
+ osi_Log0(afsd_logp, "CALL ReleaseLock SUCCESS");
+
+ lock_ObtainMutex(&scp->mx);
+
+ if (code == 0)
+ scp->serverLock = (-1);
}
+
+ done:
+
+ osi_Log1(afsd_logp, "cm_UnlockByKey code 0x%x", code);
+ osi_Log3(afsd_logp, " Leaving scp with exclusives[%d], shared[%d], serverLock[%d]",
+ scp->exclusiveLocks, scp->sharedLocks, (int)(signed char) scp->serverLock);
+
return code;
}
-long cm_Unlock(cm_scache_t *scp, unsigned char LockType,
- LARGE_INTEGER LOffset, LARGE_INTEGER LLength,
- cm_user_t *userp, cm_req_t *reqp)
+long cm_Unlock(cm_scache_t *scp,
+ unsigned char sLockType,
+ LARGE_INTEGER LOffset, LARGE_INTEGER LLength,
+ cm_key_t key,
+ cm_user_t *userp,
+ cm_req_t *reqp)
{
long code = 0;
- int Which = ((LockType & LOCKING_ANDX_SHARED_LOCK) ? LockRead : LockWrite);
+ int Which = ((sLockType & LOCKING_ANDX_SHARED_LOCK) ? LockRead : LockWrite);
AFSFid tfid;
AFSVolSync volSync;
cm_conn_t *connp;
- cm_file_lock_t *fileLock, *ourLock;
- osi_queue_t *q, *qq;
- int anotherReader = 0;
- int smallLock = 0;
- int found = 0;
+ cm_file_lock_t *fileLock;
+ osi_queue_t *q;
+ int release_userp = FALSE;
struct rx_connection * callp;
osi_Log4(afsd_logp, "cm_Unlock scp 0x%x type 0x%x offset %d length %d",
- (long) scp, LockType, (unsigned long)LOffset.QuadPart, (unsigned long)LLength.QuadPart);
+ (long) scp, sLockType, (unsigned long)LOffset.QuadPart, (unsigned long)LLength.QuadPart);
+ osi_Log2(afsd_logp, "... key 0x%x:%x",
+ (unsigned long) (key >> 32), (unsigned long) (key & 0xffffffff));
- if (LargeIntegerLessThan(LLength, scp->length))
- smallLock = 1;
+ lock_ObtainRead(&cm_scacheLock);
- /* Look for our own lock on the list, so as to remove it.
- * Also, determine if we're the last reader; if not, avoid an RPC.
- */
- q = scp->fileLocks;
- while (q) {
+ for(q = scp->fileLocksH; q; q = osi_QNext(q)) {
fileLock = (cm_file_lock_t *)
((char *) q - offsetof(cm_file_lock_t, fileq));
- if (!found
- && fileLock->userp == userp
- && LargeIntegerEqualTo(fileLock->LOffset, LOffset)
- && LargeIntegerEqualTo(fileLock->LLength, LLength)) {
- found = 1;
- ourLock = fileLock;
- qq = q;
- }
- else if (fileLock->LockType & LOCKING_ANDX_SHARED_LOCK)
- anotherReader = 1;
- q = osi_QNext(q);
- }
- /* ignore byte ranges */
- if (smallLock && !found) {
- osi_Log0(afsd_logp, "cm_Unlock lock not found and ignored");
- return 0;
+#ifdef DEBUG
+ if(cm_FidCmp(&fileLock->fid, &fileLock->scp->fid)) {
+ osi_Log0(afsd_logp, "!!fileLock->fid != scp->fid");
+ osi_Log4(afsd_logp, " fileLock->fid(cell=[%d], volume=[%d], vnode=[%d], unique=[%d]",
+ fileLock->fid.cell,
+ fileLock->fid.volume,
+ fileLock->fid.vnode,
+ fileLock->fid.unique);
+ osi_Log4(afsd_logp, " scp->fid(cell=[%d], volume=[%d], vnode=[%d], unique=[%d]",
+ fileLock->scp->fid.cell,
+ fileLock->scp->fid.volume,
+ fileLock->scp->fid.vnode,
+ fileLock->scp->fid.unique);
+ osi_assert(FALSE);
+ }
+#endif
+ if (!IS_LOCK_DELETED(fileLock) &&
+ fileLock->key == key &&
+ fileLock->range.offset == LOffset.QuadPart &&
+ fileLock->range.length == LLength.QuadPart) {
+ break;
+ }
}
- /* don't try to unlock other people's locks */
- if (!found) {
+ if(!q) {
osi_Log0(afsd_logp, "cm_Unlock lock not found; failure");
- return CM_ERROR_WOULDBLOCK;
+
+ lock_ReleaseRead(&cm_scacheLock);
+
+ return CM_ERROR_WOULDBLOCK; /* how is this an appropriate error code? */
}
/* discard lock record */
- osi_QRemove(&scp->fileLocks, qq);
+ if (scp->fileLocksT == q)
+ scp->fileLocksT = osi_QPrev(q);
+ osi_QRemove(&scp->fileLocksH, q);
+
+ if(IS_LOCK_ACCEPTED(fileLock)) {
+ if(fileLock->lockType == LockRead)
+ scp->sharedLocks--;
+ else
+ scp->exclusiveLocks--;
+ }
+
+ lock_ReleaseRead(&cm_scacheLock);
+
/*
* Don't delete it here; let the daemon delete it, to simplify
* the daemon's traversal of the list.
*/
+
lock_ObtainWrite(&cm_scacheLock);
- ourLock->flags |= CM_FILELOCK_FLAG_INVALID;
- cm_ReleaseUser(ourLock->userp);
+ fileLock->flags |= CM_FILELOCK_FLAG_DELETED;
+ if (userp != NULL) {
+ cm_ReleaseUser(fileLock->userp);
+ } else {
+ userp = fileLock->userp;
+ release_userp = TRUE;
+ }
+ fileLock->userp = NULL;
+ cm_ReleaseSCacheNoLock(scp);
+ fileLock->scp = NULL;
lock_ReleaseWrite(&cm_scacheLock);
- if (!anotherReader) {
+ if (scp->flags & CM_SCACHEFLAG_RO) {
+ osi_Log0(afsd_logp, " Skipping server locks for RO scp");
+ goto done;
+ }
+
+ /* Ideally we would go through the rest of the locks to determine
+ * if one or more locks that were formerly in WAITUNLOCK can now
+ * be put to ACTIVE or WAITLOCK and update scp->exclusiveLocks and
+ * scp->sharedLocks accordingly. However, the retrying of locks
+ * in that manner is done cm_RetryLock() manually.
+ */
+
+ if (scp->serverLock == LockWrite && scp->exclusiveLocks == 0 && scp->sharedLocks > 0) {
+
+ cm_fid_t cfid;
+
+ /* The serverLock should be downgraded to LockRead */
+ osi_Log0(afsd_logp, " DOWNGRADE lock from LockWrite to LockRead");
+
tfid.Volume = scp->fid.volume;
tfid.Vnode = scp->fid.vnode;
tfid.Unique = scp->fid.unique;
+ cfid = scp->fid;
+
lock_ReleaseMutex(&scp->mx);
+
osi_Log1(afsd_logp, "CALL ReleaseLock scp 0x%x", (long) scp);
+
do {
- code = cm_Conn(&scp->fid, userp, reqp, &connp);
+ code = cm_Conn(&cfid, userp, reqp, &connp);
if (code)
break;
callp = cm_GetRxConn(connp);
code = RXAFS_ReleaseLock(callp, &tfid, &volSync);
rx_PutConnection(callp);
+
+ } while (cm_Analyze(connp, userp, reqp, &cfid, &volSync,
+ NULL, NULL, code));
- } while (cm_Analyze(connp, userp, reqp, &scp->fid, &volSync,
- NULL, NULL, code));
code = cm_MapRPCError(code, reqp);
if (code)
osi_Log1(afsd_logp, "CALL ReleaseLock FAILURE, code 0x%x", code);
else
osi_Log0(afsd_logp, "CALL ReleaseLock SUCCESS");
+
+ lock_ObtainMutex(&scp->mx);
+
+ if (code) {
+ /* so we couldn't release it. Just let the lock be for now */
+ code = 0;
+ goto done;
+ } else {
+ scp->serverLock = -1;
+ }
+
+ tfid.Volume = scp->fid.volume;
+ tfid.Vnode = scp->fid.vnode;
+ tfid.Unique = scp->fid.unique;
+ cfid = scp->fid;
+
+ osi_Log3(afsd_logp, "CALL SetLock scp 0x%x from %d to %d", (long) scp, (int) scp->serverLock, LockRead);
+
+ lock_ReleaseMutex(&scp->mx);
+
+ do {
+
+ code = cm_Conn(&cfid, userp, reqp, &connp);
+ if (code)
+ break;
+
+ callp = cm_GetRxConn(connp);
+ code = RXAFS_SetLock(callp, &tfid, LockRead,
+ &volSync);
+
+ rx_PutConnection(callp);
+
+ } while (cm_Analyze(connp, userp, reqp, &cfid, &volSync,
+ NULL, NULL, code));
+
+ if (code)
+ osi_Log1(afsd_logp, "CALL SetLock FAILURE, code 0x%x", code);
+ else {
+ osi_Log0(afsd_logp, "CALL SetLock SUCCESS");
+ }
lock_ObtainMutex(&scp->mx);
+
+ if(code == 0)
+ scp->serverLock = LockRead;
+ else {
+ if ((scp->sharedLocks > 0 || scp->exclusiveLocks > 0) &&
+ (scp->serverLock == -1)) {
+ /* Oopsie */
+ cm_LockMarkSCacheLost(scp);
+ }
+ }
+
+ /* failure here has no bearing on the return value of
+ cm_Unlock() */
+ code = 0;
+
+ } else if(scp->serverLock != (-1) && scp->exclusiveLocks == 0 && scp->sharedLocks == 0) {
+ cm_fid_t cfid;
+
+ /* The serverLock should be released entirely */
+
+ tfid.Volume = scp->fid.volume;
+ tfid.Vnode = scp->fid.vnode;
+ tfid.Unique = scp->fid.unique;
+ cfid = scp->fid;
+
+ lock_ReleaseMutex(&scp->mx);
+
+ osi_Log1(afsd_logp, "CALL ReleaseLock scp 0x%x", (long) scp);
+
+ do {
+ code = cm_Conn(&cfid, userp, reqp, &connp);
+ if (code)
+ break;
+
+ callp = cm_GetRxConn(connp);
+ code = RXAFS_ReleaseLock(callp, &tfid, &volSync);
+ rx_PutConnection(callp);
+
+ } while (cm_Analyze(connp, userp, reqp, &cfid, &volSync,
+ NULL, NULL, code));
+ code = cm_MapRPCError(code, reqp);
+
+ if (code)
+ osi_Log1(afsd_logp, "CALL ReleaseLock FAILURE, code 0x%x", code);
+ else
+ osi_Log0(afsd_logp, "CALL ReleaseLock SUCCESS");
+
+ lock_ObtainMutex(&scp->mx);
+
+ if (code == 0) {
+ scp->serverLock = (-1);
+ }
}
+ if (release_userp)
+ cm_ReleaseUser(userp);
+
+ done:
+
osi_Log1(afsd_logp, "cm_Unlock code 0x%x", code);
+ osi_Log3(afsd_logp, " Leaving scp with exclusives[%d], shared[%d], serverLock[%d]",
+ scp->exclusiveLocks, scp->sharedLocks, (int)(signed char) scp->serverLock);
+
return code;
}
+/* called with scp->mx held */
+static void cm_LockMarkSCacheLost(cm_scache_t * scp)
+{
+ cm_file_lock_t *fileLock;
+ osi_queue_t *q;
+
+ /* cm_scacheLock needed because we are modifying
+ fileLock->flags */
+ lock_ObtainWrite(&cm_scacheLock);
+
+ for(q = scp->fileLocksH; q; q = osi_QNext(q)) {
+ fileLock =
+ (cm_file_lock_t *)((char *) q - offsetof(cm_file_lock_t, fileq));
+
+ if(IS_LOCK_ACTIVE(fileLock)) {
+ fileLock->flags |= CM_FILELOCK_FLAG_LOST;
+ }
+ }
+
+ scp->serverLock = -1;
+ lock_ReleaseWrite(&cm_scacheLock);
+}
+
+/* Called with no relevant locks held */
void cm_CheckLocks()
{
osi_queue_t *q, *nq;
cm_conn_t *connp;
long code;
struct rx_connection * callp;
-
- cm_InitReq(&req);
+ cm_scache_t * scp;
lock_ObtainWrite(&cm_scacheLock);
- q = cm_allFileLocks;
- while (q) {
+
+ cm_lockRefreshCycle++;
+
+ osi_Log1(afsd_logp, "cm_CheckLocks starting lock check cycle %d", cm_lockRefreshCycle);
+
+ for(q = cm_allFileLocks; q; q = nq) {
fileLock = (cm_file_lock_t *) q;
+
nq = osi_QNext(q);
- if (fileLock->flags & CM_FILELOCK_FLAG_INVALID) {
+
+ if (IS_LOCK_DELETED(fileLock)) {
+
osi_QRemove(&cm_allFileLocks, q);
- free(fileLock);
- }
- else if (!(fileLock->flags & CM_FILELOCK_FLAG_WAITING)) {
- tfid.Volume = fileLock->fid.volume;
- tfid.Vnode = fileLock->fid.vnode;
- tfid.Unique = fileLock->fid.unique;
- lock_ReleaseWrite(&cm_scacheLock);
- osi_Log1(afsd_logp, "CALL ExtendLock lock 0x%x", (long) fileLock);
- do {
- code = cm_Conn(&fileLock->fid, fileLock->userp,
- &req, &connp);
- if (code)
- break;
+ cm_PutFileLock(fileLock);
- callp = cm_GetRxConn(connp);
- code = RXAFS_ExtendLock(callp, &tfid,
- &volSync);
- rx_PutConnection(callp);
+ } else if (IS_LOCK_ACTIVE(fileLock) && !IS_LOCK_CLIENTONLY(fileLock)) {
- } while (cm_Analyze(connp, fileLock->userp, &req,
- &fileLock->fid, &volSync, NULL, NULL,
- code));
- code = cm_MapRPCError(code, &req);
- if (code)
- osi_Log1(afsd_logp, "CALL ExtendLock FAILURE, code 0x%x", code);
- else
- osi_Log0(afsd_logp, "CALL ExtendLock SUCCESS");
+ scp = fileLock->scp;
+ osi_assert(scp != NULL);
+ cm_HoldSCacheNoLock(scp);
+
+#ifdef DEBUG
+ if(cm_FidCmp(&fileLock->fid, &fileLock->scp->fid)) {
+ osi_Log0(afsd_logp, "!!fileLock->fid != scp->fid");
+ osi_Log4(afsd_logp, " fileLock->fid(cell=[%d], volume=[%d], vnode=[%d], unique=[%d]",
+ fileLock->fid.cell,
+ fileLock->fid.volume,
+ fileLock->fid.vnode,
+ fileLock->fid.unique);
+ osi_Log4(afsd_logp, " scp->fid(cell=[%d], volume=[%d], vnode=[%d], unique=[%d]",
+ fileLock->scp->fid.cell,
+ fileLock->scp->fid.volume,
+ fileLock->scp->fid.vnode,
+ fileLock->scp->fid.unique);
+ osi_assert(FALSE);
+ }
+#endif
+ /* Server locks are extended once per scp per refresh
+ cycle. */
+ if (scp->lastRefreshCycle != cm_lockRefreshCycle) {
+
+ int scp_done = FALSE;
+
+ osi_Log1(afsd_logp, "cm_CheckLocks Updating scp 0x%x", scp);
- lock_ObtainWrite(&cm_scacheLock);
+ lock_ReleaseWrite(&cm_scacheLock);
+ lock_ObtainMutex(&scp->mx);
+
+ /* did the lock change while we weren't holding the lock? */
+ if (!IS_LOCK_ACTIVE(fileLock))
+ goto post_syncopdone;
+
+ code = cm_SyncOp(scp, NULL, fileLock->userp, &req, 0,
+ CM_SCACHESYNC_NEEDCALLBACK
+ | CM_SCACHESYNC_GETSTATUS
+ | CM_SCACHESYNC_LOCK);
+
+ if (code) {
+ osi_Log1(smb_logp, "cm_CheckLocks SyncOp failure code 0x%x", code);
+ goto post_syncopdone;
+ }
+
+ /* cm_SyncOp releases scp->mx during which the lock
+ may get released. */
+ if (!IS_LOCK_ACTIVE(fileLock))
+ goto pre_syncopdone;
+
+ if(scp->serverLock != -1) {
+ cm_fid_t cfid;
+ cm_user_t * userp;
+
+ cm_InitReq(&req);
+
+ tfid.Volume = scp->fid.volume;
+ tfid.Vnode = scp->fid.vnode;
+ tfid.Unique = scp->fid.unique;
+ cfid = scp->fid;
+ userp = fileLock->userp;
+
+ osi_Log3(afsd_logp, "CALL ExtendLock lock 0x%x for scp=0x%x with lock %d",
+ (long) fileLock,
+ (long) scp,
+ (int) scp->serverLock);
+
+ lock_ReleaseMutex(&scp->mx);
+
+ do {
+ code = cm_Conn(&cfid, userp,
+ &req, &connp);
+ if (code)
+ break;
+
+ callp = cm_GetRxConn(connp);
+ code = RXAFS_ExtendLock(callp, &tfid,
+ &volSync);
+ rx_PutConnection(callp);
+
+ osi_Log1(afsd_logp, " ExtendLock returns %d", code);
+
+ } while (cm_Analyze(connp, userp, &req,
+ &cfid, &volSync, NULL, NULL,
+ code));
+
+ code = cm_MapRPCError(code, &req);
+
+ lock_ObtainMutex(&scp->mx);
+
+ if (code) {
+ osi_Log1(afsd_logp, "CALL ExtendLock FAILURE, code 0x%x", code);
+ cm_LockMarkSCacheLost(scp);
+ } else {
+ osi_Log0(afsd_logp, "CALL ExtendLock SUCCESS");
+ }
+ } else {
+ /* interestingly, we have found an active lock
+ belonging to an scache that has no
+ serverLock */
+ cm_LockMarkSCacheLost(scp);
+ }
+
+ scp_done = TRUE;
+
+ pre_syncopdone:
+
+ cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_LOCK);
+
+ post_syncopdone:
+ lock_ReleaseMutex(&scp->mx);
+
+ lock_ObtainWrite(&cm_scacheLock);
+
+ if (code == 0) {
+ fileLock->lastUpdate = time(NULL);
+ }
+
+ if (scp_done)
+ scp->lastRefreshCycle = cm_lockRefreshCycle;
+
+ } else {
+ /* we have already refreshed the locks on this scp */
+ fileLock->lastUpdate = time(NULL);
+ }
+
+ cm_ReleaseSCacheNoLock(scp);
+
+ } else if (IS_LOCK_ACTIVE(fileLock) && IS_LOCK_CLIENTONLY(fileLock)) {
+ /* TODO: Check callbacks */
}
- q = nq;
}
+
lock_ReleaseWrite(&cm_scacheLock);
-}
+}
-long cm_RetryLock(cm_file_lock_t *oldFileLock, int vcp_is_dead)
+/* NOT called with scp->mx held. */
+long cm_RetryLock(cm_file_lock_t *oldFileLock, int client_is_dead)
{
- long code;
- int Which = ((oldFileLock->LockType & LOCKING_ANDX_SHARED_LOCK) ? LockRead : LockWrite);
+ long code = 0;
cm_scache_t *scp;
AFSFid tfid;
AFSVolSync volSync;
cm_file_lock_t *fileLock;
osi_queue_t *q;
cm_req_t req;
- int found = 0;
struct rx_connection * callp;
+ int newLock = -1;
- if (vcp_is_dead) {
+ if (client_is_dead) {
code = CM_ERROR_TIMEDOUT;
goto handleCode;
}
- cm_InitReq(&req);
+ lock_ObtainRead(&cm_scacheLock);
- /* Look for a conflict. Also, if we are asking for a shared lock,
- * look for another shared lock, so we don't have to do an RPC.
- */
- code = cm_GetSCache(&oldFileLock->fid, &scp, oldFileLock->userp, &req);
- if (code)
- return code;
+ /* if the lock has already been granted, then we have nothing to do */
+ if(IS_LOCK_ACTIVE(oldFileLock)) {
+ lock_ReleaseRead(&cm_scacheLock);
+ return 0;
+ }
- q = scp->fileLocks;
- while (q) {
- fileLock = (cm_file_lock_t *)
- ((char *) q - offsetof(cm_file_lock_t, fileq));
- if ((fileLock->flags &
- (CM_FILELOCK_FLAG_INVALID | CM_FILELOCK_FLAG_WAITING))
- == 0) {
- if ((oldFileLock->LockType & LOCKING_ANDX_SHARED_LOCK) == 0
- || (fileLock->LockType & LOCKING_ANDX_SHARED_LOCK) == 0) {
- cm_ReleaseSCache(scp);
- return CM_ERROR_WOULDBLOCK;
+ /* we can't do anything with lost or deleted locks at the moment. */
+ if(IS_LOCK_LOST(oldFileLock) || IS_LOCK_DELETED(oldFileLock)) {
+ code = CM_ERROR_BADFD;
+ lock_ReleaseRead(&cm_scacheLock);
+ goto handleCode;
+ }
+
+ scp = oldFileLock->scp;
+
+ osi_assert(scp != NULL);
+
+ lock_ReleaseRead(&cm_scacheLock);
+ lock_ObtainMutex(&scp->mx);
+ lock_ObtainWrite(&cm_scacheLock);
+
+ /* Check if we already have a sufficient server lock to allow this
+ lock to go through */
+ if(IS_LOCK_WAITLOCK(oldFileLock) &&
+ (scp->serverLock == oldFileLock->lockType ||
+ scp->serverLock == LockWrite)) {
+
+ oldFileLock->flags &= ~CM_FILELOCK_FLAG_WAITLOCK;
+
+ lock_ReleaseWrite(&cm_scacheLock);
+ lock_ReleaseMutex(&scp->mx);
+
+ return 0;
+ }
+
+ if(IS_LOCK_WAITUNLOCK(oldFileLock)) {
+
+ /* check if the conflicting locks have dissappeared already */
+
+ for(q = scp->fileLocksH; q; q = osi_QNext(q)) {
+
+ fileLock = (cm_file_lock_t *)
+ ((char *) q - offsetof(cm_file_lock_t, fileq));
+
+ /* a oldFileLock can only depend on locks that are ahead
+ of it in the queue. If we came this far, then all
+ should be ok */
+ if(fileLock == oldFileLock) {
+ break;
+ }
+
+ if(IS_LOCK_LOST(fileLock)
+#if 0
+ && fileLock->key == oldFileLock->key
+#endif
+ ) {
+ code = CM_ERROR_BADFD;
+ oldFileLock->flags |= CM_FILELOCK_FLAG_LOST;
+ break;
+ }
+
+ /* we don't need to check for deleted locks here since deleted
+ locks are dequeued from fileLocks */
+ if(INTERSECT_RANGE(oldFileLock->range, fileLock->range)) {
+
+ if(oldFileLock->lockType != LockRead ||
+ fileLock->lockType != LockRead) {
+ code = CM_ERROR_WOULDBLOCK;
+ break;
+ }
}
- found = 1;
}
- q = osi_QNext(q);
}
- if (found)
- code = 0;
- else {
- tfid.Volume = oldFileLock->fid.volume;
- tfid.Vnode = oldFileLock->fid.vnode;
- tfid.Unique = oldFileLock->fid.unique;
+ if (code != 0) {
+ lock_ReleaseWrite(&cm_scacheLock);
+ lock_ReleaseMutex(&scp->mx);
+
+ goto handleCode;
+ }
+
+ /* when we get here, the lock is either a WAITUNLOCK or WAITLOCK.
+ If it is WAITUNLOCK, then we didn't find any conflicting lock
+ but we haven't verfied whether the serverLock is sufficient to
+ assert it. If it is WAITLOCK, then the serverLock is
+ insufficient to assert it. Eitherway, we are ready to accept
+ the lock as either ACTIVE or WAITLOCK depending on the
+ serverLock. */
+
+ oldFileLock->flags &= ~CM_FILELOCK_FLAG_WAITUNLOCK;
+
+ if (scp->serverLock == oldFileLock->lockType ||
+ (oldFileLock->lockType == LockRead &&
+ scp->serverLock == LockWrite)) {
+
+ oldFileLock->flags &= ~CM_FILELOCK_FLAG_WAITLOCK;
+
+ lock_ReleaseWrite(&cm_scacheLock);
+ lock_ReleaseMutex(&scp->mx);
+
+ return 0;
+
+ } else {
+ cm_fid_t cfid;
+ cm_user_t * userp;
+
+ oldFileLock->flags |= CM_FILELOCK_FLAG_WAITLOCK;
+
+ cm_InitReq(&req);
+
+ code = cm_SyncOp(scp, NULL, oldFileLock->userp, &req, 0,
+ CM_SCACHESYNC_NEEDCALLBACK
+ | CM_SCACHESYNC_GETSTATUS
+ | CM_SCACHESYNC_LOCK);
+ if (code) {
+ osi_Log1(smb_logp, "cm_RetryLock SyncOp failure code 0x%x", code);
+ lock_ReleaseWrite(&cm_scacheLock);
+ goto post_syncopdone;
+ }
+
+ if(!IS_LOCK_WAITLOCK(oldFileLock))
+ goto pre_syncopdone;
+
+ tfid.Volume = scp->fid.volume;
+ tfid.Vnode = scp->fid.vnode;
+ tfid.Unique = scp->fid.unique;
+ cfid = scp->fid;
+ userp = oldFileLock->userp;
+
+#ifndef AGGRESSIVE_LOCKS
+ newLock = oldFileLock->lockType;
+#else
+ newLock = LockWrite;
+#endif
+
osi_Log1(afsd_logp, "CALL SetLock lock 0x%x", (long) oldFileLock);
+
+ lock_ReleaseWrite(&cm_scacheLock);
+ lock_ReleaseMutex(&scp->mx);
+
do {
- code = cm_Conn(&oldFileLock->fid, oldFileLock->userp,
- &req, &connp);
+ code = cm_Conn(&cfid, userp, &req, &connp);
if (code)
break;
callp = cm_GetRxConn(connp);
- code = RXAFS_SetLock(callp, &tfid, Which,
+ code = RXAFS_SetLock(callp, &tfid, newLock,
&volSync);
rx_PutConnection(callp);
- } while (cm_Analyze(connp, oldFileLock->userp, &req,
- &oldFileLock->fid, &volSync,
+ } while (cm_Analyze(connp, userp, &req,
+ &cfid, &volSync,
NULL, NULL, code));
code = cm_MapRPCError(code, &req);
- if (code)
+ if (code) {
osi_Log1(afsd_logp, "CALL SetLock FAILURE, code 0x%x", code);
- else
+ } else {
osi_Log0(afsd_logp, "CALL SetLock SUCCESS");
+ }
+
+ lock_ObtainMutex(&scp->mx);
+ pre_syncopdone:
+ cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_LOCK);
+ post_syncopdone:
+ ;
}
handleCode:
if (code != 0 && code != CM_ERROR_WOULDBLOCK) {
- lock_ObtainMutex(&scp->mx);
- osi_QRemove(&scp->fileLocks, &oldFileLock->fileq);
- lock_ReleaseMutex(&scp->mx);
+ if (scp->fileLocksT == &oldFileLock->fileq)
+ scp->fileLocksT = osi_QPrev(&oldFileLock->fileq);
+ osi_QRemove(&scp->fileLocksH, &oldFileLock->fileq);
+ } else if (code == 0 && IS_LOCK_WAITLOCK(oldFileLock)) {
+ scp->serverLock = newLock;
}
+ lock_ReleaseMutex(&scp->mx);
+
lock_ObtainWrite(&cm_scacheLock);
- if (code == 0)
- oldFileLock->flags = 0;
- else if (code != CM_ERROR_WOULDBLOCK) {
- oldFileLock->flags |= CM_FILELOCK_FLAG_INVALID;
+ if (code == 0) {
+ oldFileLock->flags &= ~CM_FILELOCK_FLAG_WAITLOCK;
+ } else if (code != CM_ERROR_WOULDBLOCK) {
+ oldFileLock->flags |= CM_FILELOCK_FLAG_DELETED;
cm_ReleaseUser(oldFileLock->userp);
oldFileLock->userp = NULL;
+ cm_ReleaseSCacheNoLock(scp);
+ oldFileLock->scp = NULL;
}
lock_ReleaseWrite(&cm_scacheLock);
return code;
}
+
+cm_key_t cm_GenerateKey(unsigned int session_id, unsigned long process_id, unsigned int file_id)
+{
+ return (((cm_key_t) process_id) << 32) |
+ (((cm_key_t) session_id) << 16) |
+ (((cm_key_t) file_id));
+}
+
+static int cm_KeyEquals(cm_key_t k1, cm_key_t k2, int flags)
+{
+ if (flags & CM_UNLOCK_BY_FID) {
+ return ((k1 & 0xffffffff) == (k2 & 0xffffffff));
+ } else {
+ return (k1 == k2);
+ }
+}
return inp;
}
-/*DEBUG do not checkin*/
void OutputDebugF(char * format, ...) {
va_list args;
int len;
OutputDebugString(buf);
}
}
-/**/
#define SMB_EXT_SEC_PACKAGE_NAME "Negotiate"
+
void smb_NegotiateExtendedSecurity(void ** secBlob, int * secBlobLength) {
SECURITY_STATUS status, istatus;
CredHandle creds = {0,0};
&creds,
&expiry);
- if (status != SEC_E_OK) {
+ if (status != SEC_E_OK) {
/* Really bad. We return an empty security blob */
OutputDebugF("AcquireCredentialsHandle failed with %lX", status);
goto nes_0;
return 0;
}
-/* When using SMB auth, all SMB sessions have to pass through here first to
- * authenticate the user.
- * Caveat: If not use the SMB auth the protocol does not require sending a
- * session setup packet, which means that we can't rely on a UID in subsequent
- * packets. Though in practice we get one anyway.
+/* When using SMB auth, all SMB sessions have to pass through here
+ * first to authenticate the user.
+ *
+ * Caveat: If not using SMB auth, the protocol does not require
+ * sending a session setup packet, which means that we can't rely on a
+ * UID in subsequent packets. Though in practice we get one anyway.
*/
long smb_ReceiveV3SessionSetupX(smb_vc_t *vcp, smb_packet_t *inp, smb_packet_t *outp)
{
char requestFileName[1024] = "";
smb_tran2Packet_t *outp = 0;
cm_user_t *userp = 0;
- cm_scache_t *scp;
cm_req_t req;
CPINFO CodePageInfo;
int i, nbnLen, reqLen;
scp = NULL;
- extraInfo = (smb_GetSMBParm(inp, 2) & 1); /* return extra info */
- openFun = smb_GetSMBParm(inp, 8); /* open function */
+ extraInfo = (smb_GetSMBParm(inp, 2) & 1); /* return extra info */
+ openFun = smb_GetSMBParm(inp, 8); /* open function */
excl = ((openFun & 3) == 0);
- trunc = ((openFun & 3) == 2); /* truncate it */
+ trunc = ((openFun & 3) == 2); /* truncate it */
openMode = (smb_GetSMBParm(inp, 3) & 0x7);
- openAction = 0; /* tracks what we did */
+ openAction = 0; /* tracks what we did */
attributes = smb_GetSMBParm(inp, 5);
dosTime = smb_GetSMBParm(inp, 6) | (smb_GetSMBParm(inp, 7) << 16);
- /* compute initial mode bits based on read-only flag in attributes */
+ /* compute initial mode bits based on read-only flag in attributes */
initialModeBits = 0666;
if (attributes & 1) initialModeBits &= ~0222;
return 0;
}
-/* The file locking code is incomplete and that which is implemented in cm_Lock()
- * is broken. What exists functions only because it is rarely if ever called.
- * The tests activated by FULL_LOCKS_ONLY ensure that cm_Lock() is only called
- * if the lock covers the entire file. Therefore, RXAFS_SetLock is only called
- * rarely. That means that AFS locks are ignored by Windows clients.
- * When cm_Lock is re-written, undefine or better yet remove, the FULL_LOCKS_ONLY
- * code.
- */
-#define FULL_LOCKS_ONLY
+static void smb_GetLockParams(unsigned char LockType,
+ char ** buf,
+ unsigned int * ppid,
+ LARGE_INTEGER * pOffset,
+ LARGE_INTEGER * pLength)
+{
+ if (LockType & LOCKING_ANDX_LARGE_FILES) {
+ /* Large Files */
+ *ppid = *((USHORT *) *buf);
+ pOffset->HighPart = *((LONG *)(*buf + 4));
+ pOffset->LowPart = *((DWORD *)(*buf + 8));
+ pLength->HighPart = *((LONG *)(*buf + 12));
+ pLength->LowPart = *((DWORD *)(*buf + 16));
+ *buf += 20;
+ }
+ else {
+ /* Not Large Files */
+ *ppid = *((USHORT *) *buf);
+ pOffset->HighPart = 0;
+ pOffset->LowPart = *((DWORD *)(*buf + 2));
+ pLength->HighPart = 0;
+ pLength->LowPart = *((DWORD *)(*buf + 6));
+ *buf += 10;
+ }
+}
+
long smb_ReceiveV3LockingX(smb_vc_t *vcp, smb_packet_t *inp, smb_packet_t *outp)
{
cm_req_t req;
cm_scache_t *scp;
unsigned char LockType;
unsigned short NumberOfUnlocks, NumberOfLocks;
- unsigned long Timeout;
+ long Timeout;
char *op;
+ char *op_locks;
LARGE_INTEGER LOffset, LLength;
- smb_waitingLock_t *waitingLock;
- void *lockp;
+ smb_waitingLockRequest_t *wlRequest = NULL;
+ cm_file_lock_t *lockp;
long code = 0;
int i;
+ cm_key_t key;
+ unsigned int pid;
cm_InitReq(&req);
NumberOfUnlocks = smb_GetSMBParm(inp, 6);
NumberOfLocks = smb_GetSMBParm(inp, 7);
+ if ((LockType & LOCKING_ANDX_CANCEL_LOCK) ||
+ (LockType & LOCKING_ANDX_CHANGE_LOCKTYPE)) {
+
+ /* We don't support these requests. Apparently, we can safely
+ not deal with them too. */
+ osi_Log1(smb_logp, "smb_ReceiveV3Locking received unsupported request [%s]",
+ ((LockType & LOCKING_ANDX_CANCEL_LOCK)?
+ "LOCKING_ANDX_CANCEL_LOCK":
+ "LOCKING_ANDX_CHANGE_LOCKTYPE"));
+ /* No need to call osi_LogSaveString since these are string
+ constants.*/
+
+ code = CM_ERROR_BADOP;
+ goto done;
+
+ }
+
op = smb_GetSMBData(inp, NULL);
for (i=0; i<NumberOfUnlocks; i++) {
- if (LockType & LOCKING_ANDX_LARGE_FILES) {
- /* Large Files */
- LOffset.HighPart = *((LONG *)(op + 4));
- LOffset.LowPart = *((DWORD *)(op + 8));
- LLength.HighPart = *((LONG *)(op + 12));
- LLength.LowPart = *((DWORD *)(op + 16));
- op += 20;
- }
- else {
- /* Not Large Files */
- LOffset.HighPart = 0;
- LOffset.LowPart = *((DWORD *)(op + 2));
- LLength.HighPart = 0;
- LLength.LowPart = *((DWORD *)(op + 6));
- op += 10;
- }
-#ifdef FULL_LOCKS_ONLY
- if (LargeIntegerNotEqualToZero(LOffset)) {
- osi_Log2(smb_logp, "smb_ReceiveV3Locking Unlock %d offset 0x%x != Zero",
- i, (long)LOffset.QuadPart);
- continue;
- }
-#endif /* FULL_LOCKS_ONLY */
- /* Do not check length -- length check done in cm_Unlock */
+ smb_GetLockParams(LockType, &op, &pid, &LOffset, &LLength);
- code = cm_Unlock(scp, LockType, LOffset, LLength, userp, &req);
- if (code) goto done;
- }
+ key = cm_GenerateKey(vcp->vcID, pid, fidp->fid);
+
+ code = cm_Unlock(scp, LockType, LOffset, LLength, key, userp, &req);
+
+ if (code)
+ goto done;
+ }
+
+ op_locks = op;
for (i=0; i<NumberOfLocks; i++) {
- if (LockType & LOCKING_ANDX_LARGE_FILES) {
- /* Large Files */
- LOffset.HighPart = *((LONG *)(op + 4));
- LOffset.LowPart = *((DWORD *)(op + 8));
- LLength.HighPart = *((LONG *)(op + 12));
- LLength.LowPart = *((DWORD *)(op + 16));
- op += 20;
- }
- else {
- /* Not Large Files */
- LOffset.HighPart = 0;
- LOffset.LowPart = *((DWORD *)(op + 2));
- LLength.HighPart = 0;
- LLength.LowPart = *((DWORD *)(op + 6));
- op += 10;
- }
-#ifdef FULL_LOCKS_ONLY
- if (LargeIntegerNotEqualToZero(LOffset)) {
- osi_Log2(smb_logp, "smb_ReceiveV3Locking Lock %d offset 0x%x != Zero",
- i, (long)LOffset.QuadPart);
- continue;
- }
- if (LargeIntegerLessThan(LOffset, scp->length)) {
- osi_Log3(smb_logp, "smb_ReceiveV3Locking Unlock %d offset 0x%x < 0x%x",
- i, (long)LOffset.QuadPart, (long)scp->length.QuadPart);
- continue;
- }
-#endif /* FULL_LOCKS_ONLY */
- code = cm_Lock(scp, LockType, LOffset, LLength, Timeout,
+ smb_GetLockParams(LockType, &op, &pid, &LOffset, &LLength);
+
+ key = cm_GenerateKey(vcp->vcID, pid, fidp->fid);
+
+ code = cm_Lock(scp, LockType, LOffset, LLength, key, (Timeout != 0),
userp, &req, &lockp);
+
if (code == CM_ERROR_WOULDBLOCK && Timeout != 0) {
+ smb_waitingLock_t * wLock;
+
/* Put on waiting list */
- waitingLock = malloc(sizeof(smb_waitingLock_t));
- waitingLock->vcp = vcp;
- smb_HoldVC(vcp);
- waitingLock->inp = smb_CopyPacket(inp);
- waitingLock->outp = smb_CopyPacket(outp);
- waitingLock->timeRemaining = Timeout;
- waitingLock->lockp = lockp;
- lock_ObtainWrite(&smb_globalLock);
- osi_QAdd((osi_queue_t **)&smb_allWaitingLocks,
- &waitingLock->q);
- osi_Wakeup((long) &smb_allWaitingLocks);
- lock_ReleaseWrite(&smb_globalLock);
- /* don't send reply immediately */
- outp->flags |= SMB_PACKETFLAG_NOSEND;
+ if(wlRequest == NULL) {
+ int j;
+ char * opt;
+ cm_key_t tkey;
+ LARGE_INTEGER tOffset, tLength;
+
+ wlRequest = malloc(sizeof(smb_waitingLockRequest_t));
+
+ osi_assert(wlRequest != NULL);
+
+ wlRequest->vcp = vcp;
+ smb_HoldVC(vcp);
+ wlRequest->scp = scp;
+ cm_HoldSCache(scp);
+ wlRequest->inp = smb_CopyPacket(inp);
+ wlRequest->outp = smb_CopyPacket(outp);
+ wlRequest->lockType = LockType;
+ wlRequest->timeRemaining = Timeout;
+ wlRequest->locks = NULL;
+
+ /* The waiting lock request needs to have enough
+ information to undo all the locks in the request.
+ We do the following to store info about locks that
+ have already been granted. Sure, we can get most
+ of the info from the packet, but the packet doesn't
+ hold the result of cm_Lock call. In practice we
+ only receive packets with one or two locks, so we
+ are only wasting a few bytes here and there and
+ only for a limited period of time until the waiting
+ lock times out or is freed. */
+
+ for(opt = op_locks, j=i; j > 0; j--) {
+ smb_GetLockParams(LockType, &opt, &pid, &tOffset, &tLength);
+
+ tkey = cm_GenerateKey(vcp->vcID, pid, fidp->fid);
+
+ wLock = malloc(sizeof(smb_waitingLock_t));
+
+ osi_assert(wLock != NULL);
+
+ wLock->key = tkey;
+ wLock->LOffset = tOffset;
+ wLock->LLength = tLength;
+ wLock->lockp = NULL;
+ wLock->state = SMB_WAITINGLOCKSTATE_DONE;
+ osi_QAdd((osi_queue_t **) &wlRequest->locks,
+ &wLock->q);
+ }
+ }
+
+ wLock = malloc(sizeof(smb_waitingLock_t));
+
+ osi_assert(wLock != NULL);
+
+ wLock->key = key;
+ wLock->LOffset = LOffset;
+ wLock->LLength = LLength;
+ wLock->lockp = lockp;
+ wLock->state = SMB_WAITINGLOCKSTATE_WAITING;
+ osi_QAdd((osi_queue_t **) &wlRequest->locks,
+ &wLock->q);
+
osi_Log1(smb_logp, "smb_ReceiveV3Locking WaitingLock created 0x%x",
- (long) waitingLock);
+ (long) wLock);
+
+ code = 0;
continue;
}
+
if (code) {
osi_Log1(smb_logp, "smb_ReceiveV3Locking cm_Lock failure code 0x%x", code);
break;
}
- }
+ }
if (code) {
- /* release any locks acquired before the failure */
- osi_Log0(smb_logp, "smb_ReceiveV3Locking - failure; should be releasing locks but don't!!!!");
- }
- else
+
+ /* Since something went wrong with the lock number i, we now
+ have to go ahead and release any locks acquired before the
+ failure. All locks before lock number i (of which there
+ are i of them) have either been successful or are waiting.
+ Either case requires calling cm_Unlock(). */
+
+ /* And purge the waiting lock */
+ if(wlRequest != NULL) {
+ smb_waitingLock_t * wl;
+ smb_waitingLock_t * wlNext;
+ long ul_code;
+
+ for(wl = wlRequest->locks; wl; wl = wlNext) {
+
+ wlNext = (smb_waitingLock_t *) osi_QNext(&wl->q);
+
+ ul_code = cm_Unlock(scp, LockType, wl->LOffset, wl->LLength, wl->key, userp, &req);
+
+ if(ul_code != 0) {
+ osi_Log1(smb_logp, "smb_ReceiveV3Locking cm_Unlock returns code %d", ul_code);
+ } else {
+ osi_Log0(smb_logp, "smb_ReceiveV3Locking cm_Unlock successful");
+ }
+
+ osi_QRemove((osi_queue_t **) &wlRequest->locks, &wl->q);
+ free(wl);
+
+ }
+
+ smb_ReleaseVC(wlRequest->vcp);
+ cm_ReleaseSCache(wlRequest->scp);
+ smb_FreePacket(wlRequest->inp);
+ smb_FreePacket(wlRequest->outp);
+
+ free(wlRequest);
+
+ wlRequest = NULL;
+ }
+
+ } else {
+
+ if (wlRequest != NULL) {
+
+ lock_ObtainWrite(&smb_globalLock);
+ osi_QAdd((osi_queue_t **)&smb_allWaitingLocks,
+ &wlRequest->q);
+ osi_Wakeup((long) &smb_allWaitingLocks);
+ lock_ReleaseWrite(&smb_globalLock);
+
+ /* don't send reply immediately */
+ outp->flags |= SMB_PACKETFLAG_NOSEND;
+ }
+
smb_SetSMBDataLength(outp, 0);
+ }
+
done:
cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_LOCK);
+
doneSync:
lock_ReleaseMutex(&scp->mx);
cm_ReleaseUser(userp);
long smb_ReceiveV3ReadX(smb_vc_t *vcp, smb_packet_t *inp, smb_packet_t *outp)
{
osi_hyper_t offset;
- long count, finalCount;
+ long count;
+ long finalCount = 0;
unsigned short fd;
+ unsigned pid;
smb_fid_t *fidp;
long code = 0;
cm_user_t *userp;
+ cm_key_t key;
char *op;
fd = smb_GetSMBParm(inp, 2);
if (!fidp) {
return CM_ERROR_BADFD;
}
+
+ pid = ((smb_t *) inp)->pid;
+ key = cm_GenerateKey(vcp->vcID, pid, fd);
+ {
+ LARGE_INTEGER LOffset, LLength;
+
+ LOffset.HighPart = offset.HighPart;
+ LOffset.LowPart = offset.LowPart;
+ LLength.HighPart = 0;
+ LLength.LowPart = count;
+
+ lock_ObtainMutex(&fidp->scp->mx);
+ code = cm_LockCheckRead(fidp->scp, LOffset, LLength, key);
+ lock_ReleaseMutex(&fidp->scp->mx);
+ }
+
+ if (code) {
+ smb_ReleaseFID(fidp);
+ return code;
+ }
+
/* set inp->fid so that later read calls in same msg can find fid */
inp->fid = fd;
if (fidp->flags & SMB_FID_IOCTL) {
return smb_IoctlV3Read(fidp, vcp, inp, outp);
}
-
+
userp = smb_GetUser(vcp, inp);
/* 0 and 1 are reserved for request chaining, were setup by our caller,
unsigned int extAttributes;
unsigned int createDisp;
unsigned int createOptions;
+ unsigned int shareAccess;
int initialModeBits;
unsigned short baseFid;
smb_fid_t *baseFidp;
| (smb_GetSMBOffsetParm(inp, 8, 1) << 16);
extAttributes = smb_GetSMBOffsetParm(inp, 13, 1)
| (smb_GetSMBOffsetParm(inp, 14, 1) << 16);
+ shareAccess = smb_GetSMBOffsetParm(inp, 15, 1)
+ | (smb_GetSMBOffsetParm(inp, 16, 1) << 16);
createDisp = smb_GetSMBOffsetParm(inp, 17, 1)
| (smb_GetSMBOffsetParm(inp, 18, 1) << 16);
createOptions = smb_GetSMBOffsetParm(inp, 19, 1)
* extended attributes
*/
initialModeBits = 0666;
- if (extAttributes & 1)
+ if (extAttributes & SMB_ATTR_READONLY)
initialModeBits &= ~0222;
pathp = smb_GetSMBData(inp, NULL);
osi_Log1(smb_logp,"NTCreateX for [%s]",osi_LogSaveString(smb_logp,realPathp));
osi_Log4(smb_logp,"... da=[%x] ea=[%x] cd=[%x] co=[%x]", desiredAccess, extAttributes, createDisp, createOptions);
- osi_Log2(smb_logp,"... flags=[%x] lastNamep=[%s]", flags, osi_LogSaveString(smb_logp,(lastNamep?lastNamep:"null")));
+ osi_Log3(smb_logp,"... share=[%x] flags=[%x] lastNamep=[%s]", shareAccess, flags, osi_LogSaveString(smb_logp,(lastNamep?lastNamep:"null")));
if (lastNamep && strcmp(lastNamep, SMB_IOCTL_FILENAME) == 0) {
/* special case magic file name for receiving IOCTL requests
free(hexp);
}
#endif
+
userp = smb_GetUser(vcp, inp);
if (!userp) {
osi_Log1(smb_logp, "NTCreateX Invalid user [%d]", ((smb_t *) inp)->uid);
if (createOptions & FILE_DELETE_ON_CLOSE)
fidflags |= SMB_FID_DELONCLOSE;
+ /* and the share mode */
+ if (shareAccess & FILE_SHARE_READ)
+ fidflags |= SMB_FID_SHARE_READ;
+ if (shareAccess & FILE_SHARE_WRITE)
+ fidflags |= SMB_FID_SHARE_WRITE;
+
code = 0;
/* For an exclusive create, we want to do a case sensitive match for the last component. */
/* we have scp but not dscp */
if (baseFid != 0)
smb_ReleaseFID(baseFidp);
- }
+ }
/* if we get here, if code is 0, the file exists and is represented by
* scp. Otherwise, we have to create it. The dir may be represented
if ( createDisp == FILE_OVERWRITE ||
createDisp == FILE_OVERWRITE_IF) {
+
setAttr.mask = CM_ATTRMASK_LENGTH;
setAttr.length.LowPart = 0;
setAttr.length.HighPart = 0;
/* open the file itself */
fidp = smb_FindFID(vcp, 0, SMB_FLAG_CREATE);
osi_assert(fidp);
+
+ /* If we are restricting sharing, we should do so with a suitable
+ share lock. */
+ if (scp->fileType == CM_SCACHETYPE_FILE &&
+ !(fidflags & SMB_FID_SHARE_WRITE)) {
+ cm_key_t key;
+ LARGE_INTEGER LOffset, LLength;
+ int sLockType;
+
+ LOffset.HighPart = SMB_FID_QLOCK_HIGH;
+ LOffset.LowPart = SMB_FID_QLOCK_LOW;
+ LLength.HighPart = 0;
+ LLength.LowPart = SMB_FID_QLOCK_LENGTH;
+
+ if (fidflags & SMB_FID_SHARE_READ) {
+ sLockType = LOCKING_ANDX_SHARED_LOCK;
+ } else {
+ sLockType = 0;
+ }
+
+ key = cm_GenerateKey(vcp->vcID, SMB_FID_QLOCK_PID, fidp->fid);
+
+ lock_ObtainMutex(&scp->mx);
+ code = cm_Lock(scp, sLockType, LOffset, LLength, key, 0, userp, &req, NULL);
+ lock_ReleaseMutex(&scp->mx);
+
+ if (code) {
+ fidp->flags = SMB_FID_DELETE;
+ smb_ReleaseFID(fidp);
+
+ cm_ReleaseSCache(scp);
+ if (dscp)
+ cm_ReleaseSCache(dscp);
+ cm_ReleaseUser(userp);
+ free(realPathp);
+
+ return CM_ERROR_SHARING_VIOLATION;
+ }
+ }
+
/* save a pointer to the vnode */
fidp->scp = scp; /* Hold transfered to fidp->scp and no longer needed */
cm_ReleaseSCache(dscp);
dscp = NULL;
}
+
cm_Open(scp, 0, userp);
/* set inp->fid so that later read calls in same msg can find fid */
cm_ReleaseUser(userp);
- /* Can't free realPathp if we get here since fidp->NTopen_wholepathp is pointing there */
+ /* Can't free realPathp if we get here since
+ fidp->NTopen_wholepathp is pointing there */
/* leave scp held since we put it in fidp->scp */
return 0;
unsigned int desiredAccess;
#ifdef DEBUG_VERBOSE
unsigned int allocSize;
- unsigned int shareAccess;
#endif
+ unsigned int shareAccess;
unsigned int extAttributes;
unsigned int createDisp;
#ifdef DEBUG_VERBOSE
allocSize = lparmp[3];
#endif /* DEBUG_VERSOSE */
extAttributes = lparmp[5];
-#ifdef DEBUG_VEROSE
shareAccess = lparmp[6];
-#endif
createDisp = lparmp[7];
createOptions = lparmp[8];
#ifdef DEBUG_VERBOSE
if (createOptions & FILE_DELETE_ON_CLOSE)
fidflags |= SMB_FID_DELONCLOSE;
+ /* And the share mode */
+ if (shareAccess & FILE_SHARE_READ)
+ fidflags |= SMB_FID_SHARE_READ;
+ if (shareAccess & FILE_SHARE_WRITE)
+ fidflags |= SMB_FID_SHARE_WRITE;
+
dscp = NULL;
code = 0;
if ( createDisp == FILE_OPEN ||
fidp = smb_FindFID(vcp, 0, SMB_FLAG_CREATE);
osi_assert(fidp);
+ /* If we are restricting sharing, we should do so with a suitable
+ share lock. */
+ if (scp->fileType == CM_SCACHETYPE_FILE &&
+ !(fidflags & SMB_FID_SHARE_WRITE)) {
+ cm_key_t key;
+ LARGE_INTEGER LOffset, LLength;
+ int sLockType;
+
+ LOffset.HighPart = SMB_FID_QLOCK_HIGH;
+ LOffset.LowPart = SMB_FID_QLOCK_LOW;
+ LLength.HighPart = 0;
+ LLength.LowPart = SMB_FID_QLOCK_LENGTH;
+
+ if (fidflags & SMB_FID_SHARE_READ) {
+ sLockType = LOCKING_ANDX_SHARED_LOCK;
+ } else {
+ sLockType = 0;
+ }
+
+ key = cm_GenerateKey(vcp->vcID, SMB_FID_QLOCK_PID, fidp->fid);
+
+ lock_ObtainMutex(&scp->mx);
+ code = cm_Lock(scp, sLockType, LOffset, LLength, key, 0, userp, &req, NULL);
+ lock_ReleaseMutex(&scp->mx);
+
+ if (code) {
+ fidp->flags = SMB_FID_DELETE;
+ smb_ReleaseFID(fidp);
+
+ cm_ReleaseSCache(scp);
+ cm_ReleaseUser(userp);
+ free(realPathp);
+
+ return CM_ERROR_SHARING_VIOLATION;
+ }
+ }
+
/* save a pointer to the vnode */
fidp->scp = scp;