2 * Copyright (C) 1998, 1989 Transarc Corporation - All rights reserved
4 * (C) COPYRIGHT IBM CORPORATION 1987, 1988
5 * LICENSED MATERIALS - PROPERTY OF IBM
10 * Copyright (C) 1994, 1990 Transarc Corporation
11 * All rights reserved.
14 #include <afs/param.h>
25 * This next lock controls access to all cm_aclent structures in the system,
26 * in either the free list or in the LRU queue. A read lock prevents someone
27 * from modifying the list(s), and a write lock is required for modifying
28 * the list. The actual data stored in the randomUid and randomAccess fields
29 * is actually maintained as up-to-date or not via the scache llock.
30 * An aclent structure is free if it has no back vnode pointer.
32 osi_rwlock_t cm_aclLock; /* lock for system's aclents */
33 cm_aclent_t *cm_aclLRUp; /* LRUQ for dudes in vnodes' lists */
34 cm_aclent_t *cm_aclLRUEndp; /* ditto */
37 * Get an acl cache entry for a particular user and file, or return that it doesn't exist.
38 * Called with the scp locked.
40 long cm_FindACLCache(cm_scache_t *scp, cm_user_t *userp, long *rightsp)
44 lock_ObtainWrite(&cm_aclLock);
45 for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) {
46 if (aclp->userp == userp) {
47 if (aclp->tgtLifetime && aclp->tgtLifetime <= (long) osi_Time()) {
49 aclp->tgtLifetime = 0;
51 break; /* get a new acl from server */
54 *rightsp = aclp->randomAccess;
55 if (cm_aclLRUEndp == aclp)
56 cm_aclLRUEndp = (cm_aclent_t *) osi_QPrev(&aclp->q);
58 /* move to the head of the LRU queue */
59 osi_QRemove((osi_queue_t **) &cm_aclLRUp, &aclp->q);
60 osi_QAddH((osi_queue_t **) &cm_aclLRUp,
61 (osi_queue_t **) &cm_aclLRUEndp,
64 lock_ReleaseWrite(&cm_aclLock);
70 * If we make it here, this entry isn't present, so we're going to fail.
72 lock_ReleaseWrite(&cm_aclLock);
78 * This function returns a free (not in the LRU queue) acl cache entry.
79 * It must be called with the cm_aclLock lock held.
81 static cm_aclent_t *GetFreeACLEnt(void)
87 if (cm_aclLRUp == NULL)
88 osi_panic("empty aclent LRU", __FILE__, __LINE__);
91 if (aclp == cm_aclLRUEndp)
92 cm_aclLRUEndp = (cm_aclent_t *) osi_QPrev(&aclp->q);
93 osi_QRemove((osi_queue_t **) &cm_aclLRUp, &aclp->q);
96 * Remove the entry from the vnode's list
98 laclpp = &aclp->backp->randomACLp;
99 for (taclp = *laclpp; taclp; laclpp = &taclp->nextp, taclp = *laclpp) {
104 osi_panic("GetFreeACLEnt race", __FILE__, __LINE__);
105 *laclpp = aclp->nextp; /* remove from vnode list */
109 /* release the old user */
111 cm_ReleaseUser(aclp->userp);
119 * Add rights to an acl cache entry. Do the right thing if not present,
120 * including digging up an entry from the LRU queue.
122 * The scp must be locked when this function is called.
124 long cm_AddACLCache(cm_scache_t *scp, cm_user_t *userp, long rights)
126 register struct cm_aclent *aclp;
128 lock_ObtainWrite(&cm_aclLock);
129 for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) {
130 if (aclp->userp == userp) {
131 aclp->randomAccess = rights;
132 if (aclp->tgtLifetime == 0)
133 aclp->tgtLifetime = cm_TGTLifeTime(pag);
134 lock_ReleaseWrite(&cm_aclLock);
140 * Didn't find the dude we're looking for, so take someone from the LRUQ
141 * and reuse. But first try the free list and see if there's already
144 aclp = GetFreeACLEnt(); /* can't fail, panics instead */
145 osi_QAddH((osi_queue_t **) &cm_aclLRUp, (osi_queue_t **) &cm_aclLRUEndp, &aclp->q);
147 aclp->nextp = scp->randomACLp;
148 scp->randomACLp = aclp;
151 aclp->randomAccess = rights;
152 aclp->tgtLifetime = cm_TGTLifeTime(userp);
153 lock_ReleaseWrite(&cm_aclLock);
159 * Initialize the cache to have an entries. Called during system startup.
161 long cm_InitACLCache(long size)
165 static osi_once_t once;
168 if (osi_Once(&once)) {
169 lock_InitializeRWLock(&cm_aclLock, "cm_aclLock");
173 lock_ObtainWrite(&cm_aclLock);
174 cm_aclLRUp = cm_aclLRUEndp = NULL;
175 aclp = (cm_aclent_t *) malloc(size * sizeof(cm_aclent_t));
176 memset(aclp, 0, size * sizeof(cm_aclent_t));
179 * Put all of these guys on the LRU queue
181 for (i = 0; i < size; i++) {
182 osi_QAddH((osi_queue_t **) &cm_aclLRUp, (osi_queue_t **) &cm_aclLRUEndp,
187 lock_ReleaseWrite(&cm_aclLock);
193 * Free all associated acl entries. We actually just clear the back pointer
194 * since the acl entries are already in the free list. The scp must be locked
195 * or completely unreferenced (such as when called while recycling the scp).
197 void cm_FreeAllACLEnts(cm_scache_t *scp)
202 lock_ObtainWrite(&cm_aclLock);
203 for (aclp = scp->randomACLp; aclp; aclp = taclp) {
206 cm_ReleaseUser(aclp->userp);
209 aclp->backp = (struct cm_scache *) 0;
212 scp->randomACLp = (struct cm_aclent *) 0;
213 scp->anyAccess = 0; /* reset this, too */
214 lock_ReleaseWrite(&cm_aclLock);
219 * Invalidate all ACL entries for particular user on this particular vnode.
221 * The scp must be locked.
223 void cm_InvalidateACLUser(cm_scache_t *scp, cm_user_t *userp)
226 cm_aclent_t **laclpp;
228 lock_ObtainWrite(&cm_aclLock);
229 laclpp = &scp->randomACLp;
230 for (aclp = *laclpp; aclp; laclpp = &aclp->nextp, aclp = *laclpp) {
231 if (userp == aclp->userp) { /* One for a given user/scache */
232 *laclpp = aclp->nextp;
233 cm_ReleaseUser(aclp->userp);
235 aclp->backp = (struct cm_scache *) 0;
239 lock_ReleaseWrite(&cm_aclLock);