2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afs/param.h>
23 * This next lock controls access to all cm_aclent structures in the system,
24 * in either the free list or in the LRU queue. A read lock prevents someone
25 * from modifying the list(s), and a write lock is required for modifying
26 * the list. The actual data stored in the randomUid and randomAccess fields
27 * is actually maintained as up-to-date or not via the scache llock.
28 * An aclent structure is free if it has no back vnode pointer.
30 osi_rwlock_t cm_aclLock; /* lock for system's aclents */
33 * Get an acl cache entry for a particular user and file, or return that it doesn't exist.
34 * Called with the scp locked.
36 long cm_FindACLCache(cm_scache_t *scp, cm_user_t *userp, long *rightsp)
41 lock_ObtainWrite(&cm_aclLock);
42 for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) {
43 if (aclp->userp == userp) {
44 if (aclp->tgtLifetime && aclp->tgtLifetime <= (long) osi_Time()) {
46 aclp->tgtLifetime = 0;
47 *rightsp = 0; /* get a new acl from server */
49 /* Shouldn't we remove this entry from the scp?
50 * 2005-01-25 - jaltman@secure-endpoints.com
53 *rightsp = aclp->randomAccess;
54 if (cm_data.aclLRUEndp == aclp)
55 cm_data.aclLRUEndp = (cm_aclent_t *) osi_QPrev(&aclp->q);
57 /* move to the head of the LRU queue */
58 osi_QRemove((osi_queue_t **) &cm_data.aclLRUp, &aclp->q);
59 osi_QAddH((osi_queue_t **) &cm_data.aclLRUp,
60 (osi_queue_t **) &cm_data.aclLRUEndp,
62 retval = 0; /* success */
68 lock_ReleaseWrite(&cm_aclLock);
74 * This function returns a free (not in the LRU queue) acl cache entry.
75 * It must be called with the cm_aclLock lock held.
77 static cm_aclent_t *GetFreeACLEnt(void)
83 if (cm_data.aclLRUp == NULL)
84 osi_panic("empty aclent LRU", __FILE__, __LINE__);
86 lock_ObtainWrite(&cm_aclLock);
87 aclp = cm_data.aclLRUEndp;
88 if (aclp == cm_data.aclLRUEndp)
89 cm_data.aclLRUEndp = (cm_aclent_t *) osi_QPrev(&aclp->q);
90 osi_QRemove((osi_queue_t **) &cm_data.aclLRUp, &aclp->q);
93 * Remove the entry from the vnode's list
95 laclpp = &aclp->backp->randomACLp;
96 for (taclp = *laclpp; taclp; laclpp = &taclp->nextp, taclp = *laclpp) {
101 osi_panic("GetFreeACLEnt race", __FILE__, __LINE__);
102 *laclpp = aclp->nextp; /* remove from vnode list */
106 /* release the old user */
108 cm_ReleaseUser(aclp->userp);
111 lock_ReleaseWrite(&cm_aclLock);
117 * Add rights to an acl cache entry. Do the right thing if not present,
118 * including digging up an entry from the LRU queue.
120 * The scp must be locked when this function is called.
122 long cm_AddACLCache(cm_scache_t *scp, cm_user_t *userp, long rights)
124 register struct cm_aclent *aclp;
126 lock_ObtainWrite(&cm_aclLock);
127 for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) {
128 if (aclp->userp == userp) {
129 aclp->randomAccess = rights;
130 if (aclp->tgtLifetime == 0)
131 aclp->tgtLifetime = cm_TGTLifeTime(pag);
132 lock_ReleaseWrite(&cm_aclLock);
138 * Didn't find the dude we're looking for, so take someone from the LRUQ
139 * and reuse. But first try the free list and see if there's already
142 aclp = GetFreeACLEnt(); /* can't fail, panics instead */
143 osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q);
145 aclp->nextp = scp->randomACLp;
146 scp->randomACLp = aclp;
149 aclp->randomAccess = rights;
150 aclp->tgtLifetime = cm_TGTLifeTime(userp);
151 lock_ReleaseWrite(&cm_aclLock);
156 long cm_ShutdownACLCache(void)
161 long cm_ValidateACLCache(void)
163 long size = cm_data.stats * 2;
167 for ( aclp = cm_data.aclLRUp, count = 0; aclp;
168 aclp = (cm_aclent_t *) osi_QNext(&aclp->q), count++ ) {
169 if (aclp->magic != CM_ACLENT_MAGIC) {
170 afsi_log("cm_ValidateACLCache failure: acpl->magic != CM_ACLENT_MAGIC");
171 fprintf(stderr, "cm_ValidateACLCache failure: acpl->magic != CM_ACLENT_MAGIC\n");
174 if (aclp->nextp && aclp->nextp->magic != CM_ACLENT_MAGIC) {
175 afsi_log("cm_ValidateACLCache failure: acpl->nextp->magic != CM_ACLENT_MAGIC");
176 fprintf(stderr,"cm_ValidateACLCache failure: acpl->nextp->magic != CM_ACLENT_MAGIC\n");
179 if (aclp->backp && aclp->backp->magic != CM_SCACHE_MAGIC) {
180 afsi_log("cm_ValidateACLCache failure: acpl->backp->magic != CM_SCACHE_MAGIC");
181 fprintf(stderr,"cm_ValidateACLCache failure: acpl->backp->magic != CM_SCACHE_MAGIC\n");
184 if (count != 0 && aclp == cm_data.aclLRUp || count > size) {
185 afsi_log("cm_ValidateACLCache failure: loop in cm_data.aclLRUp list");
186 fprintf(stderr, "cm_ValidateACLCache failure: loop in cm_data.aclLRUp list\n");
191 for ( aclp = cm_data.aclLRUEndp, count = 0; aclp;
192 aclp = (cm_aclent_t *) osi_QPrev(&aclp->q), count++ ) {
193 if (aclp->magic != CM_ACLENT_MAGIC) {
194 afsi_log("cm_ValidateACLCache failure: aclp->magic != CM_ACLENT_MAGIC");
195 fprintf(stderr, "cm_ValidateACLCache failure: aclp->magic != CM_ACLENT_MAGIC\n");
198 if (aclp->nextp && aclp->nextp->magic != CM_ACLENT_MAGIC) {
199 afsi_log("cm_ValidateACLCache failure: aclp->nextp->magic != CM_ACLENT_MAGIC");
200 fprintf(stderr, "cm_ValidateACLCache failure: aclp->nextp->magic != CM_ACLENT_MAGIC\n");
203 if (aclp->backp && aclp->backp->magic != CM_SCACHE_MAGIC) {
204 afsi_log("cm_ValidateACLCache failure: aclp->backp->magic != CM_SCACHE_MAGIC");
205 fprintf(stderr, "cm_ValidateACLCache failure: aclp->backp->magic != CM_SCACHE_MAGIC\n");
209 if (count != 0 && aclp == cm_data.aclLRUEndp || count > size) {
210 afsi_log("cm_ValidateACLCache failure: loop in cm_data.aclLRUEndp list");
211 fprintf(stderr, "cm_ValidateACLCache failure: loop in cm_data.aclLRUEndp list\n");
220 * Initialize the cache to have an entries. Called during system startup.
222 long cm_InitACLCache(int newFile, long size)
226 static osi_once_t once;
228 if (osi_Once(&once)) {
229 lock_InitializeRWLock(&cm_aclLock, "cm_aclLock");
233 lock_ObtainWrite(&cm_aclLock);
235 cm_data.aclLRUp = cm_data.aclLRUEndp = NULL;
236 aclp = (cm_aclent_t *) cm_data.aclBaseAddress;
237 memset(aclp, 0, size * sizeof(cm_aclent_t));
240 * Put all of these guys on the LRU queue
242 for (i = 0; i < size; i++) {
243 aclp->magic = CM_ACLENT_MAGIC;
244 osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q);
248 aclp = (cm_aclent_t *) cm_data.aclBaseAddress;
249 for (i = 0; i < size; i++) {
251 aclp->tgtLifetime = 0;
255 lock_ReleaseWrite(&cm_aclLock);
261 * Free all associated acl entries. We actually just clear the back pointer
262 * since the acl entries are already in the free list. The scp must be locked
263 * or completely unreferenced (such as when called while recycling the scp).
265 void cm_FreeAllACLEnts(cm_scache_t *scp)
270 lock_ObtainWrite(&cm_aclLock);
271 for (aclp = scp->randomACLp; aclp; aclp = taclp) {
274 cm_ReleaseUser(aclp->userp);
277 aclp->backp = (struct cm_scache *) 0;
280 scp->randomACLp = (struct cm_aclent *) 0;
281 scp->anyAccess = 0; /* reset this, too */
282 lock_ReleaseWrite(&cm_aclLock);
287 * Invalidate all ACL entries for particular user on this particular vnode.
289 * The scp must be locked.
291 void cm_InvalidateACLUser(cm_scache_t *scp, cm_user_t *userp)
294 cm_aclent_t **laclpp;
296 lock_ObtainWrite(&cm_aclLock);
297 laclpp = &scp->randomACLp;
298 for (aclp = *laclpp; aclp; laclpp = &aclp->nextp, aclp = *laclpp) {
299 if (userp == aclp->userp) { /* One for a given user/scache */
300 *laclpp = aclp->nextp;
301 cm_ReleaseUser(aclp->userp);
303 aclp->backp = (struct cm_scache *) 0;
307 lock_ReleaseWrite(&cm_aclLock);