2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include <afs/param.h>
24 * This next lock controls access to all cm_aclent structures in the system,
25 * in either the free list or in the LRU queue. A read lock prevents someone
26 * from modifying the list(s), and a write lock is required for modifying
27 * the list. The actual data stored in the randomUid and randomAccess fields
28 * is actually maintained as up-to-date or not via the scache lock.
29 * An aclent structure is free if it has no back vnode pointer.
31 osi_rwlock_t cm_aclLock; /* lock for system's aclents */
33 /* This must be called with cm_aclLock and the aclp->back->mx held */
34 static void CleanupACLEnt(cm_aclent_t * aclp)
40 if (aclp->backp->randomACLp) {
42 * Remove the entry from the vnode's list
44 lock_AssertWrite(&aclp->backp->rw);
45 laclpp = &aclp->backp->randomACLp;
46 for (taclp = *laclpp; taclp; laclpp = &taclp->nextp, taclp = *laclpp) {
51 osi_panic("CleanupACLEnt race", __FILE__, __LINE__);
52 *laclpp = aclp->nextp; /* remove from vnode list */
57 /* release the old user */
59 cm_ReleaseUser(aclp->userp);
63 aclp->randomAccess = 0;
64 aclp->tgtLifetime = 0;
68 * Get an acl cache entry for a particular user and file, or return that it doesn't exist.
69 * Called with the scp write locked.
71 long cm_FindACLCache(cm_scache_t *scp, cm_user_t *userp, afs_uint32 *rightsp)
75 time_t now = time(NULL);
77 lock_AssertWrite(&scp->rw);
78 lock_ObtainWrite(&cm_aclLock);
79 *rightsp = 0; /* get a new acl from server if we don't find a
83 for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) {
84 if (aclp->userp == userp) {
85 if (aclp->tgtLifetime && aclp->tgtLifetime <= now) {
87 osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q);
90 /* move to the tail of the LRU queue */
91 osi_QAddT((osi_queue_t **) &cm_data.aclLRUp,
92 (osi_queue_t **) &cm_data.aclLRUEndp,
95 *rightsp = aclp->randomAccess;
96 if (cm_data.aclLRUp != aclp) {
97 /* move to the head of the LRU queue */
98 osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q);
99 osi_QAddH((osi_queue_t **) &cm_data.aclLRUp,
100 (osi_queue_t **) &cm_data.aclLRUEndp,
103 retval = 0; /* success */
109 lock_ReleaseWrite(&cm_aclLock);
114 * This function returns a free (not in the LRU queue) acl cache entry.
115 * It must be called with the cm_aclLock lock held
117 static cm_aclent_t *GetFreeACLEnt(cm_scache_t * scp)
120 cm_scache_t *ascp = 0;
122 if (cm_data.aclLRUp == NULL)
123 osi_panic("empty aclent LRU", __FILE__, __LINE__);
125 if (cm_data.aclLRUEndp == NULL)
126 osi_panic("inconsistent aclent LRUEndp == NULL", __FILE__, __LINE__);
128 aclp = cm_data.aclLRUEndp;
129 osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q);
131 if (aclp->backp && scp != aclp->backp) {
133 lock_ReleaseWrite(&cm_aclLock);
134 lock_ObtainWrite(&ascp->rw);
135 lock_ObtainWrite(&cm_aclLock);
140 lock_ReleaseWrite(&ascp->rw);
144 time_t cm_TGTLifeTime(cm_user_t *userp, afs_uint32 cellID)
146 cm_cell_t *cellp = NULL;
147 cm_ucell_t * ucp = NULL;
148 time_t expirationTime = 0;
150 lock_ObtainMutex(&userp->mx);
151 cellp = cm_FindCellByID(cellID, CM_FLAG_NOPROBE);
152 ucp = cm_GetUCell(userp, cellp);
154 expirationTime = ucp->expirationTime;
155 lock_ReleaseMutex(&userp->mx);
157 return expirationTime;
161 cm_HaveToken(cm_user_t *userp, afs_uint32 cellID)
163 cm_cell_t *cellp = NULL;
164 cm_ucell_t * ucp = NULL;
168 lock_ObtainMutex(&userp->mx);
169 cellp = cm_FindCellByID(cellID, CM_FLAG_NOPROBE);
170 ucp = cm_GetUCell(userp, cellp);
173 if (ucp->expirationTime > now)
176 lock_ReleaseMutex(&userp->mx);
183 * Add rights to an acl cache entry. Do the right thing if not present,
184 * including digging up an entry from the LRU queue.
186 * The scp must be locked when this function is called.
188 long cm_AddACLCache(cm_scache_t *scp, cm_user_t *userp, afs_uint32 rights)
190 struct cm_aclent *aclp;
193 tgtLifeTime = cm_TGTLifeTime(userp, scp->fid.cell);
195 lock_ObtainWrite(&cm_aclLock);
196 for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) {
197 if (aclp->userp == userp) {
198 aclp->randomAccess = rights;
199 if (aclp->tgtLifetime < tgtLifeTime)
200 aclp->tgtLifetime = tgtLifeTime;
201 if (cm_data.aclLRUp != aclp) {
202 /* move to the head of the LRU queue */
203 osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q);
204 osi_QAddH((osi_queue_t **) &cm_data.aclLRUp,
205 (osi_queue_t **) &cm_data.aclLRUEndp,
208 lock_ReleaseWrite(&cm_aclLock);
214 * Didn't find the dude we're looking for, so take someone from the LRUQ
215 * and reuse. But first try the free list and see if there's already
218 aclp = GetFreeACLEnt(scp); /* can't fail, panics instead */
219 osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q);
221 aclp->nextp = scp->randomACLp;
222 scp->randomACLp = aclp;
225 aclp->randomAccess = rights;
226 aclp->tgtLifetime = tgtLifeTime;
227 lock_ReleaseWrite(&cm_aclLock);
232 long cm_ShutdownACLCache(void)
237 long cm_ValidateACLCache(void)
239 long size = cm_data.stats * 2;
243 if ( cm_data.aclLRUp == NULL && cm_data.aclLRUEndp != NULL ||
244 cm_data.aclLRUp != NULL && cm_data.aclLRUEndp == NULL) {
245 afsi_log("cm_ValidateACLCache failure: inconsistent LRU pointers");
246 fprintf(stderr, "cm_ValidateACLCache failure: inconsistent LRU pointers\n");
250 for ( aclp = cm_data.aclLRUp, count = 0; aclp;
251 aclp = (cm_aclent_t *) osi_QNext(&aclp->q), count++ ) {
253 if ( aclp < (cm_aclent_t *)cm_data.aclBaseAddress ||
254 aclp >= (cm_aclent_t *)cm_data.scacheBaseAddress) {
255 afsi_log("cm_ValidateACLCache failure: out of range cm_aclent_t pointers");
256 fprintf(stderr, "cm_ValidateACLCache failure: out of range cm_aclent_t pointers\n");
260 if (aclp->magic != CM_ACLENT_MAGIC) {
261 afsi_log("cm_ValidateACLCache failure: acpl->magic != CM_ACLENT_MAGIC");
262 fprintf(stderr, "cm_ValidateACLCache failure: acpl->magic != CM_ACLENT_MAGIC\n");
266 if ( aclp->nextp < (cm_aclent_t *)cm_data.aclBaseAddress ||
267 aclp->nextp >= (cm_aclent_t *)cm_data.scacheBaseAddress) {
268 afsi_log("cm_ValidateACLCache failure: out of range cm_aclent_t pointers");
269 fprintf(stderr, "cm_ValidateACLCache failure: out of range cm_aclent_t pointers\n");
273 if (aclp->nextp && aclp->nextp->magic != CM_ACLENT_MAGIC) {
274 afsi_log("cm_ValidateACLCache failure: acpl->nextp->magic != CM_ACLENT_MAGIC");
275 fprintf(stderr,"cm_ValidateACLCache failure: acpl->nextp->magic != CM_ACLENT_MAGIC\n");
279 if ( aclp->backp < (cm_scache_t *)cm_data.scacheBaseAddress ||
280 aclp->backp >= (cm_scache_t *)cm_data.dnlcBaseAddress) {
281 afsi_log("cm_ValidateACLCache failure: out of range cm_scache_t pointers");
282 fprintf(stderr, "cm_ValidateACLCache failure: out of range cm_scache_t pointers\n");
286 if (aclp->backp && aclp->backp->magic != CM_SCACHE_MAGIC) {
287 afsi_log("cm_ValidateACLCache failure: acpl->backp->magic != CM_SCACHE_MAGIC");
288 fprintf(stderr,"cm_ValidateACLCache failure: acpl->backp->magic != CM_SCACHE_MAGIC\n");
291 if (count != 0 && aclp == cm_data.aclLRUp || count > size) {
292 afsi_log("cm_ValidateACLCache failure: loop in cm_data.aclLRUp list");
293 fprintf(stderr, "cm_ValidateACLCache failure: loop in cm_data.aclLRUp list\n");
298 for ( aclp = cm_data.aclLRUEndp, count = 0; aclp;
299 aclp = (cm_aclent_t *) osi_QPrev(&aclp->q), count++ ) {
301 if ( aclp < (cm_aclent_t *)cm_data.aclBaseAddress ||
302 aclp >= (cm_aclent_t *)cm_data.scacheBaseAddress) {
303 afsi_log("cm_ValidateACLCache failure: out of range cm_aclent_t pointers");
304 fprintf(stderr, "cm_ValidateACLCache failure: out of range cm_aclent_t pointers\n");
308 if (aclp->magic != CM_ACLENT_MAGIC) {
309 afsi_log("cm_ValidateACLCache failure: aclp->magic != CM_ACLENT_MAGIC");
310 fprintf(stderr, "cm_ValidateACLCache failure: aclp->magic != CM_ACLENT_MAGIC\n");
314 if ( aclp->nextp < (cm_aclent_t *)cm_data.aclBaseAddress ||
315 aclp->nextp >= (cm_aclent_t *)cm_data.scacheBaseAddress) {
316 afsi_log("cm_ValidateACLCache failure: out of range cm_aclent_t pointers");
317 fprintf(stderr, "cm_ValidateACLCache failure: out of range cm_aclent_t pointers\n");
321 if (aclp->nextp && aclp->nextp->magic != CM_ACLENT_MAGIC) {
322 afsi_log("cm_ValidateACLCache failure: aclp->nextp->magic != CM_ACLENT_MAGIC");
323 fprintf(stderr, "cm_ValidateACLCache failure: aclp->nextp->magic != CM_ACLENT_MAGIC\n");
327 if ( aclp->backp < (cm_scache_t *)cm_data.scacheBaseAddress ||
328 aclp->backp >= (cm_scache_t *)cm_data.dnlcBaseAddress) {
329 afsi_log("cm_ValidateACLCache failure: out of range cm_scache_t pointers");
330 fprintf(stderr, "cm_ValidateACLCache failure: out of range cm_scache_t pointers\n");
334 if (aclp->backp && aclp->backp->magic != CM_SCACHE_MAGIC) {
335 afsi_log("cm_ValidateACLCache failure: aclp->backp->magic != CM_SCACHE_MAGIC");
336 fprintf(stderr, "cm_ValidateACLCache failure: aclp->backp->magic != CM_SCACHE_MAGIC\n");
340 if (count != 0 && aclp == cm_data.aclLRUEndp || count > size) {
341 afsi_log("cm_ValidateACLCache failure: loop in cm_data.aclLRUEndp list");
342 fprintf(stderr, "cm_ValidateACLCache failure: loop in cm_data.aclLRUEndp list\n");
351 * Initialize the cache to have an entries. Called during system startup.
353 long cm_InitACLCache(int newFile, long size)
357 static osi_once_t once;
359 if (osi_Once(&once)) {
360 lock_InitializeRWLock(&cm_aclLock, "cm_aclLock", LOCK_HIERARCHY_ACL_GLOBAL);
364 lock_ObtainWrite(&cm_aclLock);
366 cm_data.aclLRUp = cm_data.aclLRUEndp = NULL;
367 aclp = (cm_aclent_t *) cm_data.aclBaseAddress;
368 memset(aclp, 0, size * sizeof(cm_aclent_t));
371 * Put all of these guys on the LRU queue
373 for (i = 0; i < size; i++) {
374 aclp->magic = CM_ACLENT_MAGIC;
375 osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q);
379 aclp = (cm_aclent_t *) cm_data.aclBaseAddress;
380 for (i = 0; i < size; i++) {
382 aclp->tgtLifetime = 0;
386 lock_ReleaseWrite(&cm_aclLock);
392 * Free all associated acl entries. We actually just clear the back pointer
393 * since the acl entries are already in the free list. The scp must be locked
394 * or completely unreferenced (such as when called while recycling the scp).
396 void cm_FreeAllACLEnts(cm_scache_t *scp)
401 lock_ObtainWrite(&cm_aclLock);
402 for (aclp = scp->randomACLp; aclp; aclp = taclp) {
405 cm_ReleaseUser(aclp->userp);
408 aclp->backp = (struct cm_scache *) 0;
411 scp->randomACLp = (struct cm_aclent *) 0;
412 scp->anyAccess = 0; /* reset this, too */
413 lock_ReleaseWrite(&cm_aclLock);
418 * Invalidate all ACL entries for particular user on this particular vnode.
420 * The scp must not be locked.
422 void cm_InvalidateACLUser(cm_scache_t *scp, cm_user_t *userp)
425 cm_aclent_t **laclpp;
429 lock_ObtainWrite(&scp->rw);
430 lock_ObtainWrite(&cm_aclLock);
431 laclpp = &scp->randomACLp;
432 for (aclp = *laclpp; aclp; laclpp = &aclp->nextp, aclp = *laclpp) {
433 if (userp == aclp->userp) { /* One for a given user/scache */
434 *laclpp = aclp->nextp;
435 cm_ReleaseUser(aclp->userp);
437 aclp->backp = (struct cm_scache *) 0;
442 lock_ReleaseWrite(&cm_aclLock);
444 callback = cm_HaveCallback(scp);
445 lock_ReleaseWrite(&scp->rw);
447 if (found && callback && RDR_Initialized)
448 RDR_InvalidateObject(scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique,
449 scp->fid.hash, scp->fileType, AFS_INVALIDATE_CREDS);
453 * Invalidate ACL info for a user that has just obtained or lost tokens.
456 cm_ResetACLCache(cm_cell_t *cellp, cm_user_t *userp)
458 cm_volume_t *volp, *nextVolp;
459 cm_scache_t *scp, *nextScp;
462 lock_ObtainRead(&cm_scacheLock);
463 for (hash=0; hash < cm_data.scacheHashTableSize; hash++) {
464 for (scp=cm_data.scacheHashTablep[hash]; scp; scp=nextScp) {
465 nextScp = scp->nextp;
467 scp->fid.cell == cellp->cellID) {
468 cm_HoldSCacheNoLock(scp);
469 lock_ReleaseRead(&cm_scacheLock);
470 cm_InvalidateACLUser(scp, userp);
471 lock_ObtainRead(&cm_scacheLock);
472 cm_ReleaseSCacheNoLock(scp);
476 lock_ReleaseRead(&cm_scacheLock);
478 cm_EAccesClearUserEntries(userp, cellp ? cellp->cellID : 0);
480 if (RDR_Initialized) {
481 lock_ObtainRead(&cm_volumeLock);
482 for (hash = 0; hash < cm_data.volumeHashTableSize; hash++) {
483 for ( volp = cm_data.volumeRWIDHashTablep[hash]; volp; volp = nextVolp) {
484 nextVolp = volp->vol[RWVOL].nextp;
485 if ((cellp == NULL || cellp->cellID == volp->cellp->cellID) &&
486 volp->vol[RWVOL].ID) {
487 lock_ReleaseRead(&cm_volumeLock);
488 RDR_InvalidateVolume(volp->cellp->cellID, volp->vol[RWVOL].ID, AFS_INVALIDATE_CREDS);
489 lock_ObtainRead(&cm_volumeLock);
492 for ( volp = cm_data.volumeROIDHashTablep[hash]; volp; volp = nextVolp) {
493 nextVolp = volp->vol[ROVOL].nextp;
494 if ((cellp == NULL || cellp->cellID == volp->cellp->cellID) &&
495 volp->vol[ROVOL].ID) {
496 lock_ReleaseRead(&cm_volumeLock);
497 RDR_InvalidateVolume(volp->cellp->cellID, volp->vol[ROVOL].ID, AFS_INVALIDATE_CREDS);
498 lock_ObtainRead(&cm_volumeLock);
501 for ( volp = cm_data.volumeBKIDHashTablep[hash]; volp; volp = nextVolp) {
502 nextVolp = volp->vol[BACKVOL].nextp;
503 if ((cellp == NULL || cellp->cellID == volp->cellp->cellID) &&
504 volp->vol[BACKVOL].ID) {
505 lock_ReleaseRead(&cm_volumeLock);
506 RDR_InvalidateVolume(volp->cellp->cellID, volp->vol[BACKVOL].ID, AFS_INVALIDATE_CREDS);
507 lock_ObtainRead(&cm_volumeLock);
511 lock_ReleaseRead(&cm_volumeLock);