2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
24 * This next lock controls access to all cm_aclent structures in the system,
25 * in either the free list or in the LRU queue. A read lock prevents someone
26 * from modifying the list(s), and a write lock is required for modifying
27 * the list. The actual data stored in the randomUid and randomAccess fields
28 * is actually maintained as up-to-date or not via the scache lock.
29 * An aclent structure is free if it has no back vnode pointer.
31 osi_rwlock_t cm_aclLock; /* lock for system's aclents */
33 /* This must be called with cm_aclLock and the aclp->back->mx held */
34 static void CleanupACLEnt(cm_aclent_t * aclp)
40 if (aclp->backp->randomACLp) {
42 * Remove the entry from the vnode's list
44 lock_AssertMutex(&aclp->backp->mx);
45 laclpp = &aclp->backp->randomACLp;
46 for (taclp = *laclpp; taclp; laclpp = &taclp->nextp, taclp = *laclpp) {
51 osi_panic("CleanupACLEnt race", __FILE__, __LINE__);
52 *laclpp = aclp->nextp; /* remove from vnode list */
57 /* release the old user */
59 cm_ReleaseUser(aclp->userp);
63 aclp->randomAccess = 0;
64 aclp->tgtLifetime = 0;
68 * Get an acl cache entry for a particular user and file, or return that it doesn't exist.
69 * Called with the scp locked.
71 long cm_FindACLCache(cm_scache_t *scp, cm_user_t *userp, long *rightsp)
76 lock_ObtainWrite(&cm_aclLock);
77 *rightsp = 0; /* get a new acl from server if we don't find a
81 for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) {
82 if (aclp->userp == userp) {
83 if (aclp->tgtLifetime && aclp->tgtLifetime <= osi_Time()) {
85 osi_QRemove((osi_queue_t **) &cm_data.aclLRUp, &aclp->q);
88 /* move to the tail of the LRU queue */
89 osi_QAddT((osi_queue_t **) &cm_data.aclLRUp,
90 (osi_queue_t **) &cm_data.aclLRUEndp,
93 *rightsp = aclp->randomAccess;
94 if (cm_data.aclLRUEndp == aclp)
95 cm_data.aclLRUEndp = (cm_aclent_t *) osi_QPrev(&aclp->q);
97 /* move to the head of the LRU queue */
98 osi_QRemove((osi_queue_t **) &cm_data.aclLRUp, &aclp->q);
99 osi_QAddH((osi_queue_t **) &cm_data.aclLRUp,
100 (osi_queue_t **) &cm_data.aclLRUEndp,
102 retval = 0; /* success */
108 lock_ReleaseWrite(&cm_aclLock);
113 * This function returns a free (not in the LRU queue) acl cache entry.
114 * It must be called with the cm_aclLock lock held
116 static cm_aclent_t *GetFreeACLEnt(cm_scache_t * scp)
119 cm_scache_t *ascp = 0;
121 if (cm_data.aclLRUp == NULL)
122 osi_panic("empty aclent LRU", __FILE__, __LINE__);
124 aclp = cm_data.aclLRUEndp;
125 cm_data.aclLRUEndp = (cm_aclent_t *) osi_QPrev(&aclp->q);
126 osi_QRemove((osi_queue_t **) &cm_data.aclLRUp, &aclp->q);
128 if (aclp->backp && scp != aclp->backp) {
130 lock_ObtainMutex(&ascp->mx);
135 lock_ReleaseMutex(&ascp->mx);
141 * Add rights to an acl cache entry. Do the right thing if not present,
142 * including digging up an entry from the LRU queue.
144 * The scp must be locked when this function is called.
146 long cm_AddACLCache(cm_scache_t *scp, cm_user_t *userp, long rights)
148 register struct cm_aclent *aclp;
150 lock_ObtainWrite(&cm_aclLock);
151 for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) {
152 if (aclp->userp == userp) {
153 aclp->randomAccess = rights;
154 if (aclp->tgtLifetime == 0)
155 aclp->tgtLifetime = cm_TGTLifeTime(pag);
156 lock_ReleaseWrite(&cm_aclLock);
162 * Didn't find the dude we're looking for, so take someone from the LRUQ
163 * and reuse. But first try the free list and see if there's already
166 aclp = GetFreeACLEnt(scp); /* can't fail, panics instead */
167 osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q);
169 aclp->nextp = scp->randomACLp;
170 scp->randomACLp = aclp;
173 aclp->randomAccess = rights;
174 aclp->tgtLifetime = cm_TGTLifeTime(userp);
175 lock_ReleaseWrite(&cm_aclLock);
180 long cm_ShutdownACLCache(void)
185 long cm_ValidateACLCache(void)
187 long size = cm_data.stats * 2;
191 for ( aclp = cm_data.aclLRUp, count = 0; aclp;
192 aclp = (cm_aclent_t *) osi_QNext(&aclp->q), count++ ) {
193 if (aclp->magic != CM_ACLENT_MAGIC) {
194 afsi_log("cm_ValidateACLCache failure: acpl->magic != CM_ACLENT_MAGIC");
195 fprintf(stderr, "cm_ValidateACLCache failure: acpl->magic != CM_ACLENT_MAGIC\n");
198 if (aclp->nextp && aclp->nextp->magic != CM_ACLENT_MAGIC) {
199 afsi_log("cm_ValidateACLCache failure: acpl->nextp->magic != CM_ACLENT_MAGIC");
200 fprintf(stderr,"cm_ValidateACLCache failure: acpl->nextp->magic != CM_ACLENT_MAGIC\n");
203 if (aclp->backp && aclp->backp->magic != CM_SCACHE_MAGIC) {
204 afsi_log("cm_ValidateACLCache failure: acpl->backp->magic != CM_SCACHE_MAGIC");
205 fprintf(stderr,"cm_ValidateACLCache failure: acpl->backp->magic != CM_SCACHE_MAGIC\n");
208 if (count != 0 && aclp == cm_data.aclLRUp || count > size) {
209 afsi_log("cm_ValidateACLCache failure: loop in cm_data.aclLRUp list");
210 fprintf(stderr, "cm_ValidateACLCache failure: loop in cm_data.aclLRUp list\n");
215 for ( aclp = cm_data.aclLRUEndp, count = 0; aclp;
216 aclp = (cm_aclent_t *) osi_QPrev(&aclp->q), count++ ) {
217 if (aclp->magic != CM_ACLENT_MAGIC) {
218 afsi_log("cm_ValidateACLCache failure: aclp->magic != CM_ACLENT_MAGIC");
219 fprintf(stderr, "cm_ValidateACLCache failure: aclp->magic != CM_ACLENT_MAGIC\n");
222 if (aclp->nextp && aclp->nextp->magic != CM_ACLENT_MAGIC) {
223 afsi_log("cm_ValidateACLCache failure: aclp->nextp->magic != CM_ACLENT_MAGIC");
224 fprintf(stderr, "cm_ValidateACLCache failure: aclp->nextp->magic != CM_ACLENT_MAGIC\n");
227 if (aclp->backp && aclp->backp->magic != CM_SCACHE_MAGIC) {
228 afsi_log("cm_ValidateACLCache failure: aclp->backp->magic != CM_SCACHE_MAGIC");
229 fprintf(stderr, "cm_ValidateACLCache failure: aclp->backp->magic != CM_SCACHE_MAGIC\n");
233 if (count != 0 && aclp == cm_data.aclLRUEndp || count > size) {
234 afsi_log("cm_ValidateACLCache failure: loop in cm_data.aclLRUEndp list");
235 fprintf(stderr, "cm_ValidateACLCache failure: loop in cm_data.aclLRUEndp list\n");
244 * Initialize the cache to have an entries. Called during system startup.
246 long cm_InitACLCache(int newFile, long size)
250 static osi_once_t once;
252 if (osi_Once(&once)) {
253 lock_InitializeRWLock(&cm_aclLock, "cm_aclLock");
257 lock_ObtainWrite(&cm_aclLock);
259 cm_data.aclLRUp = cm_data.aclLRUEndp = NULL;
260 aclp = (cm_aclent_t *) cm_data.aclBaseAddress;
261 memset(aclp, 0, size * sizeof(cm_aclent_t));
264 * Put all of these guys on the LRU queue
266 for (i = 0; i < size; i++) {
267 aclp->magic = CM_ACLENT_MAGIC;
268 osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q);
272 aclp = (cm_aclent_t *) cm_data.aclBaseAddress;
273 for (i = 0; i < size; i++) {
275 aclp->tgtLifetime = 0;
279 lock_ReleaseWrite(&cm_aclLock);
285 * Free all associated acl entries. We actually just clear the back pointer
286 * since the acl entries are already in the free list. The scp must be locked
287 * or completely unreferenced (such as when called while recycling the scp).
289 void cm_FreeAllACLEnts(cm_scache_t *scp)
294 lock_ObtainWrite(&cm_aclLock);
295 for (aclp = scp->randomACLp; aclp; aclp = taclp) {
298 cm_ReleaseUser(aclp->userp);
301 aclp->backp = (struct cm_scache *) 0;
304 scp->randomACLp = (struct cm_aclent *) 0;
305 scp->anyAccess = 0; /* reset this, too */
306 lock_ReleaseWrite(&cm_aclLock);
311 * Invalidate all ACL entries for particular user on this particular vnode.
313 * The scp must be locked.
315 void cm_InvalidateACLUser(cm_scache_t *scp, cm_user_t *userp)
318 cm_aclent_t **laclpp;
320 lock_ObtainWrite(&cm_aclLock);
321 laclpp = &scp->randomACLp;
322 for (aclp = *laclpp; aclp; laclpp = &aclp->nextp, aclp = *laclpp) {
323 if (userp == aclp->userp) { /* One for a given user/scache */
324 *laclpp = aclp->nextp;
325 cm_ReleaseUser(aclp->userp);
327 aclp->backp = (struct cm_scache *) 0;
331 lock_ReleaseWrite(&cm_aclLock);