2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
12 #include <afs/param.h>
27 #define TRACE_BUFFER 1
30 extern void afsi_log(char *pattern, ...);
32 /* This module implements the buffer package used by the local transaction
33 * system (cm). It is initialized by calling cm_Init, which calls buf_Init;
34 * it must be initalized before any of its main routines are called.
36 * Each buffer is hashed into a hash table by file ID and offset, and if its
37 * reference count is zero, it is also in a free list.
39 * There are two locks involved in buffer processing. The global lock
40 * buf_globalLock protects all of the global variables defined in this module,
41 * the reference counts and hash pointers in the actual cm_buf_t structures,
42 * and the LRU queue pointers in the buffer structures.
44 * The mutexes in the buffer structures protect the remaining fields in the
45 * buffers, as well the data itself.
47 * The locking hierarchy here is this:
49 * - resv multiple simul. buffers reservation
50 * - lock buffer I/O flags
51 * - lock buffer's mutex
52 * - lock buf_globalLock
56 /* global debugging log */
57 osi_log_t *buf_logp = NULL;
59 /* Global lock protecting hash tables and free lists */
60 osi_rwlock_t buf_globalLock;
62 /* ptr to head of the free list (most recently used) and the
63 * tail (the guy to remove first). We use osi_Q* functions
64 * to put stuff in buf_freeListp, and maintain the end
67 cm_buf_t *buf_freeListp;
68 cm_buf_t *buf_freeListEndp;
70 /* a pointer to a list of all buffers, just so that we can find them
71 * easily for debugging, and for the incr syncer. Locked under
76 /* defaults setup; these variables may be manually assigned into
77 * before calling cm_Init, as a way of changing these defaults.
79 long buf_nbuffers = CM_BUF_BUFFERS;
80 long buf_nOrigBuffers;
81 long buf_bufferSize = CM_BUF_SIZE;
82 long buf_hashSize = CM_BUF_HASHSIZE;
83 int buf_cacheType = CM_BUF_CACHETYPE_FILE;
93 /* buffer reservation variables */
94 long buf_reservedBufs;
95 long buf_maxReservedBufs;
96 int buf_reserveWaiting;
98 /* callouts for reading and writing data, etc */
99 cm_buf_ops_t *cm_buf_opsp;
101 /* pointer to hash table; size computed dynamically */
102 cm_buf_t **buf_hashTablepp;
104 /* another hash table */
105 cm_buf_t **buf_fileHashTablepp;
108 /* for experimental disk caching support in Win95 client */
109 cm_buf_t *buf_diskFreeListp;
110 cm_buf_t *buf_diskFreeListEndp;
111 cm_buf_t *buf_diskAllp;
112 extern int cm_diskCacheEnabled;
113 #endif /* DISKCACHE95 */
115 /* hold a reference to an already held buffer */
116 void buf_Hold(cm_buf_t *bp)
118 lock_ObtainWrite(&buf_globalLock);
120 lock_ReleaseWrite(&buf_globalLock);
123 /* incremental sync daemon. Writes 1/10th of all the buffers every 5000 ms */
124 void buf_IncrSyncer(long parm)
126 cm_buf_t *bp; /* buffer we're hacking on; held */
127 long i; /* counter */
128 long nAtOnce; /* how many to do at once */
131 lock_ObtainWrite(&buf_globalLock);
134 lock_ReleaseWrite(&buf_globalLock);
135 nAtOnce = buf_nbuffers / 10;
138 i = SleepEx(5000, 1);
139 if (i != 0) continue;
144 /* now go through our percentage of the buffers */
145 for(i=0; i<nAtOnce; i++) {
146 /* don't want its identity changing while we're
147 * messing with it, so must do all of this with
151 /* start cleaning the buffer; don't touch log pages since
152 * the log code counts on knowing exactly who is writing
153 * a log page at any given instant.
156 req.flags |= CM_REQ_NORETRY;
157 buf_CleanAsync(bp, &req);
159 /* now advance to the next buffer; the allp chain never changes,
160 * and so can be followed even when holding no locks.
162 lock_ObtainWrite(&buf_globalLock);
163 buf_LockedRelease(bp);
165 if (!bp) bp = buf_allp;
167 lock_ReleaseWrite(&buf_globalLock);
168 } /* for loop over a bunch of buffers */
169 } /* whole daemon's while loop */
173 /* Create a security attribute structure suitable for use when the cache file
174 * is created. What we mainly want is that only the administrator should be
175 * able to do anything with the file. We create an ACL with only one entry,
176 * an entry that grants all rights to the administrator.
178 PSECURITY_ATTRIBUTES CreateCacheFileSA()
180 PSECURITY_ATTRIBUTES psa;
181 PSECURITY_DESCRIPTOR psd;
182 SID_IDENTIFIER_AUTHORITY authority = SECURITY_NT_AUTHORITY;
184 DWORD AdminSIDlength;
188 /* Get Administrator SID */
189 AllocateAndInitializeSid(&authority, 2,
190 SECURITY_BUILTIN_DOMAIN_RID,
191 DOMAIN_ALIAS_RID_ADMINS,
195 /* Create Administrator-only ACL */
196 AdminSIDlength = GetLengthSid(AdminSID);
197 ACLlength = sizeof(ACL) + sizeof(ACCESS_ALLOWED_ACE)
198 + AdminSIDlength - sizeof(DWORD);
199 AdminOnlyACL = GlobalAlloc(GMEM_FIXED, ACLlength);
200 InitializeAcl(AdminOnlyACL, ACLlength, ACL_REVISION);
201 AddAccessAllowedAce(AdminOnlyACL, ACL_REVISION,
202 STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL,
205 /* Create security descriptor */
206 psd = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_DESCRIPTOR));
207 InitializeSecurityDescriptor(psd, SECURITY_DESCRIPTOR_REVISION);
208 SetSecurityDescriptorDacl(psd, TRUE, AdminOnlyACL, FALSE);
210 /* Create security attributes structure */
211 psa = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_ATTRIBUTES));
212 psa->nLength = sizeof(SECURITY_ATTRIBUTES);
213 psa->lpSecurityDescriptor = psd;
214 psa->bInheritHandle = TRUE;
221 /* Free a security attribute structure created by CreateCacheFileSA() */
222 VOID FreeCacheFileSA(PSECURITY_ATTRIBUTES psa)
227 GetSecurityDescriptorDacl(psa->lpSecurityDescriptor, &b1, &pAcl, &b2);
229 GlobalFree(psa->lpSecurityDescriptor);
234 /* initialize the buffer package; called with no locks
235 * held during the initialization phase.
237 long buf_Init(cm_buf_ops_t *opsp)
239 static osi_once_t once;
245 PSECURITY_ATTRIBUTES psa;
253 /* Get system info; all we really want is the allocation granularity */
254 GetSystemInfo(&sysInfo);
257 /* Have to be able to reserve a whole chunk */
258 if (((buf_nbuffers - 3) * buf_bufferSize) < cm_chunkSize)
259 return CM_ERROR_TOOFEWBUFS;
261 /* recall for callouts */
264 if (osi_Once(&once)) {
265 /* initialize global locks */
266 lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
270 * Cache file mapping constrained by
271 * system allocation granularity;
272 * round up, assuming granularity is a power of two
274 cs = buf_nbuffers * buf_bufferSize;
275 cs = (cs + (sysInfo.dwAllocationGranularity - 1))
276 & ~(sysInfo.dwAllocationGranularity - 1);
277 if (cs != buf_nbuffers * buf_bufferSize) {
278 buf_nbuffers = cs / buf_bufferSize;
279 afsi_log("Cache size rounded up to %d buffers",
284 /* remember this for those who want to reset it */
285 buf_nOrigBuffers = buf_nbuffers;
287 /* lower hash size to a prime number */
288 buf_hashSize = osi_PrimeLessThan(buf_hashSize);
290 /* create hash table */
291 buf_hashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
292 memset((void *)buf_hashTablepp, 0,
293 buf_hashSize * sizeof(cm_buf_t *));
295 /* another hash table */
296 buf_fileHashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
297 memset((void *)buf_fileHashTablepp, 0,
298 buf_hashSize * sizeof(cm_buf_t *));
300 /* min value for which this works */
304 if (buf_cacheType == CM_BUF_CACHETYPE_FILE) {
305 /* Reserve buffer space by mapping cache file */
306 psa = CreateCacheFileSA();
307 hf = CreateFile(cm_CachePath,
308 GENERIC_READ | GENERIC_WRITE,
309 FILE_SHARE_READ | FILE_SHARE_WRITE,
312 FILE_ATTRIBUTE_NORMAL,
314 FreeCacheFileSA(psa);
315 if (hf == INVALID_HANDLE_VALUE) {
316 afsi_log("Error creating cache file \"%s\" error %d",
317 cm_CachePath, GetLastError());
318 return CM_ERROR_INVAL;
320 } else { /* buf_cacheType == CM_BUF_CACHETYPE_VIRTUAL */
321 hf = INVALID_HANDLE_VALUE;
324 hm = CreateFileMapping(hf,
327 0, buf_nbuffers * buf_bufferSize,
330 if (GetLastError() == ERROR_DISK_FULL) {
331 afsi_log("Error creating cache file \"%s\" mapping: disk full",
333 return CM_ERROR_TOOMANYBUFS;
335 return CM_ERROR_INVAL;
337 data = MapViewOfFile(hm,
340 buf_nbuffers * buf_bufferSize);
342 if (hf != INVALID_HANDLE_VALUE)
345 return CM_ERROR_INVAL;
349 /* djgpp doesn't support memory mapped files */
350 data = malloc(buf_nbuffers * buf_bufferSize);
353 /* create buffer headers and put in free list */
354 bp = malloc(buf_nbuffers * sizeof(cm_buf_t));
356 for(i=0; i<buf_nbuffers; i++) {
357 /* allocate and zero some storage */
358 memset(bp, 0, sizeof(cm_buf_t));
360 /* thread on list of all buffers */
364 osi_QAdd((osi_queue_t **)&buf_freeListp, &bp->q);
365 bp->flags |= CM_BUF_INLRU;
366 lock_InitializeMutex(&bp->mx, "Buffer mutex");
368 /* grab appropriate number of bytes from aligned zone */
371 /* setup last buffer pointer */
373 buf_freeListEndp = bp;
377 data += buf_bufferSize;
380 /* none reserved at first */
381 buf_reservedBufs = 0;
383 /* just for safety's sake */
384 buf_maxReservedBufs = buf_nbuffers - 3;
387 /* init the buffer trace log */
388 buf_logp = osi_LogCreate("buffer", 1000);
389 osi_LogEnable(buf_logp);
394 /* and create the incr-syncer */
395 phandle = thrd_Create(0, 0,
396 (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
399 osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
401 CloseHandle(phandle);
408 /* add nbuffers to the buffer pool, if possible.
409 * Called with no locks held.
411 long buf_AddBuffers(long nbuffers)
420 afsi_log("%d buffers being added to the existing cache of size %d",
421 nbuffers, buf_nbuffers);
423 if (buf_cacheType == CM_BUF_CACHETYPE_VIRTUAL) {
424 /* The size of a virtual cache cannot be changed after it has
425 * been created. Subsequent calls to MapViewofFile() with
426 * an existing mapping object name would not allow the
427 * object to be resized. Return failure immediately.
429 return CM_ERROR_INVAL;
433 * Cache file mapping constrained by
434 * system allocation granularity;
435 * round up, assuming granularity is a power of two;
436 * assume existing cache size is already rounded
438 cs = nbuffers * buf_bufferSize;
439 cs = (cs + (sysInfo.dwAllocationGranularity - 1))
440 & ~(sysInfo.dwAllocationGranularity - 1);
441 if (cs != nbuffers * buf_bufferSize) {
442 nbuffers = cs / buf_bufferSize;
445 /* Reserve additional buffer space by remapping cache file */
446 hm = CreateFileMapping(CacheHandle,
449 0, (buf_nbuffers + nbuffers) * buf_bufferSize,
452 if (GetLastError() == ERROR_DISK_FULL)
453 return CM_ERROR_TOOMANYBUFS;
455 return CM_ERROR_INVAL;
457 data = MapViewOfFile(hm,
459 0, buf_nbuffers * buf_bufferSize,
460 nbuffers * buf_bufferSize);
463 return CM_ERROR_INVAL;
467 data = malloc(buf_nbuffers * buf_bufferSize);
470 /* Create buffer headers and put in free list */
471 bp = malloc(nbuffers * sizeof(*bp));
473 for(i=0; i<nbuffers; i++) {
474 memset(bp, 0, sizeof(*bp));
476 lock_InitializeMutex(&bp->mx, "cm_buf_t");
478 /* grab appropriate number of bytes from aligned zone */
481 bp->flags |= CM_BUF_INLRU;
483 lock_ObtainWrite(&buf_globalLock);
484 /* note that buf_allp chain is covered by buf_globalLock now */
487 osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
488 if (!buf_freeListEndp) buf_freeListEndp = bp;
490 lock_ReleaseWrite(&buf_globalLock);
493 data += buf_bufferSize;
495 } /* for loop over all buffers */
500 /* interface to set the number of buffers to an exact figure.
501 * Called with no locks held.
503 long buf_SetNBuffers(long nbuffers)
506 return CM_ERROR_INVAL;
507 if (nbuffers == buf_nbuffers)
509 else if (nbuffers > buf_nbuffers)
510 return buf_AddBuffers(nbuffers - buf_nbuffers);
512 return CM_ERROR_INVAL;
515 /* release a buffer. Buffer must be referenced, but unlocked. */
516 void buf_Release(cm_buf_t *bp)
518 lock_ObtainWrite(&buf_globalLock);
519 buf_LockedRelease(bp);
520 lock_ReleaseWrite(&buf_globalLock);
523 /* wait for reading or writing to clear; called with write-locked
524 * buffer, and returns with locked buffer.
526 void buf_WaitIO(cm_buf_t *bp)
529 /* if no IO is happening, we're done */
530 if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
533 /* otherwise I/O is happening, but some other thread is waiting for
534 * the I/O already. Wait for that guy to figure out what happened,
535 * and then check again.
537 if ( bp->flags & CM_BUF_WAITING )
538 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING already set for 0x%x", bp);
540 bp->flags |= CM_BUF_WAITING;
541 osi_SleepM((long) bp, &bp->mx);
542 lock_ObtainMutex(&bp->mx);
543 osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%x", bp);
546 /* if we get here, the IO is done, but we may have to wakeup people waiting for
547 * the I/O to complete. Do so.
549 if (bp->flags & CM_BUF_WAITING) {
550 bp->flags &= ~CM_BUF_WAITING;
551 osi_Wakeup((long) bp);
553 osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%x", (long) bp);
556 /* code to drop reference count while holding buf_globalLock */
557 void buf_LockedRelease(cm_buf_t *bp)
559 /* ensure that we're in the LRU queue if our ref count is 0 */
560 osi_assert(bp->refCount > 0);
561 if (--bp->refCount == 0) {
562 if (!(bp->flags & CM_BUF_INLRU)) {
563 osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
565 /* watch for transition from empty to one element */
566 if (!buf_freeListEndp)
567 buf_freeListEndp = buf_freeListp;
568 bp->flags |= CM_BUF_INLRU;
573 /* find a buffer, if any, for a particular file ID and offset. Assumes
574 * that buf_globalLock is write locked when called.
576 cm_buf_t *buf_LockedFind(struct cm_scache *scp, osi_hyper_t *offsetp)
581 i = BUF_HASH(&scp->fid, offsetp);
582 for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) {
583 if (cm_FidCmp(&scp->fid, &bp->fid) == 0
584 && offsetp->LowPart == bp->offset.LowPart
585 && offsetp->HighPart == bp->offset.HighPart) {
591 /* return whatever we found, if anything */
595 /* find a buffer with offset *offsetp for vnode *scp. Called
596 * with no locks held.
598 cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
602 lock_ObtainWrite(&buf_globalLock);
603 bp = buf_LockedFind(scp, offsetp);
604 lock_ReleaseWrite(&buf_globalLock);
609 /* start cleaning I/O on this buffer. Buffer must be write locked, and is returned
612 * Makes sure that there's only one person writing this block
613 * at any given time, and also ensures that the log is forced sufficiently far,
614 * if this buffer contains logged data.
616 void buf_LockedCleanAsync(cm_buf_t *bp, cm_req_t *reqp)
621 while ((bp->flags & (CM_BUF_WRITING | CM_BUF_DIRTY)) == CM_BUF_DIRTY) {
622 lock_ReleaseMutex(&bp->mx);
624 code = (*cm_buf_opsp->Writep)(&bp->fid, &bp->offset,
625 buf_bufferSize, 0, bp->userp,
628 lock_ObtainMutex(&bp->mx);
633 /* Disk cache support */
634 /* write buffer to disk cache (synchronous for now) */
635 diskcache_Update(bp->dcp, bp->datap, buf_bufferSize, bp->dataVersion);
636 #endif /* DISKCACHE95 */
639 /* do logging after call to GetLastError, or else */
640 osi_Log2(buf_logp, "buf_CleanAsync starts I/O on 0x%x, done=%d", bp, code);
642 /* if someone was waiting for the I/O that just completed or failed,
645 if (bp->flags & CM_BUF_WAITING) {
646 /* turn off flags and wakeup users */
647 bp->flags &= ~CM_BUF_WAITING;
648 osi_Wakeup((long) bp);
652 /* Called with a zero-ref count buffer and with the buf_globalLock write locked.
653 * recycles the buffer, and leaves it ready for reuse with a ref count of 1.
654 * The buffer must already be clean, and no I/O should be happening to it.
656 void buf_Recycle(cm_buf_t *bp)
661 cm_buf_t *prevBp, *nextBp;
663 /* if we get here, we know that the buffer still has a 0 ref count,
664 * and that it is clean and has no currently pending I/O. This is
665 * the dude to return.
666 * Remember that as long as the ref count is 0, we know that we won't
667 * have any lock conflicts, so we can grab the buffer lock out of
668 * order in the locking hierarchy.
670 osi_Log2( buf_logp, "buf_Recycle recycles 0x%x, off 0x%x",
671 bp, bp->offset.LowPart);
673 osi_assert(bp->refCount == 0);
674 osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)));
675 lock_AssertWrite(&buf_globalLock);
677 if (bp->flags & CM_BUF_INHASH) {
678 /* Remove from hash */
680 i = BUF_HASH(&bp->fid, &bp->offset);
681 lbpp = &(buf_hashTablepp[i]);
682 for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
683 if (tbp == bp) break;
686 /* we better find it */
687 osi_assertx(tbp != NULL, "buf_GetNewLocked: hash table screwup");
689 *lbpp = bp->hashp; /* hash out */
691 /* Remove from file hash */
693 i = BUF_FILEHASH(&bp->fid);
694 prevBp = bp->fileHashBackp;
695 nextBp = bp->fileHashp;
697 prevBp->fileHashp = nextBp;
699 buf_fileHashTablepp[i] = nextBp;
701 nextBp->fileHashBackp = prevBp;
703 bp->flags &= ~CM_BUF_INHASH;
706 /* bump the soft reference counter now, to invalidate softRefs; no
707 * wakeup is required since people don't sleep waiting for this
712 /* make the fid unrecognizable */
713 memset(&bp->fid, 0, sizeof(bp->fid));
716 /* recycle a buffer, removing it from the free list, hashing in its new identity
717 * and returning it write-locked so that no one can use it. Called without
718 * any locks held, and can return an error if it loses the race condition and
719 * finds that someone else created the desired buffer.
721 * If success is returned, the buffer is returned write-locked.
723 * May be called with null scp and offsetp, if we're just trying to reclaim some
724 * space from the buffer pool. In that case, the buffer will be returned
725 * without being hashed into the hash table.
727 long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
729 cm_buf_t *bp; /* buffer we're dealing with */
730 cm_buf_t *nextBp; /* next buffer in file hash chain */
734 cm_InitReq(&req); /* just in case */
738 lock_ObtainWrite(&buf_globalLock);
739 /* check to see if we lost the race */
741 if (bp = buf_LockedFind(scp, offsetp)) {
743 lock_ReleaseWrite(&buf_globalLock);
744 return CM_BUF_EXISTS;
748 /* for debugging, assert free list isn't empty, although we
749 * really should try waiting for a running tranasction to finish
750 * instead of this; or better, we should have a transaction
751 * throttler prevent us from entering this situation.
753 osi_assertx(buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
755 /* look at all buffers in free list, some of which may temp.
756 * have high refcounts and which then should be skipped,
757 * starting cleaning I/O for those which are dirty. If we find
758 * a clean buffer, we rehash it, lock it and return it.
760 for(bp = buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
761 /* check to see if it really has zero ref count. This
762 * code can bump refcounts, at least, so it may not be
765 if (bp->refCount > 0)
768 /* we don't have to lock buffer itself, since the ref
769 * count is 0 and we know it will stay zero as long as
770 * we hold the global lock.
773 /* don't recycle someone in our own chunk */
774 if (!cm_FidCmp(&bp->fid, &scp->fid)
775 && (bp->offset.LowPart & (-cm_chunkSize))
776 == (offsetp->LowPart & (-cm_chunkSize)))
779 /* if this page is being filled (!) or cleaned, see if
780 * the I/O has completed. If not, skip it, otherwise
781 * do the final processing for the I/O.
783 if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
784 /* probably shouldn't do this much work while
785 * holding the big lock? Watch for contention
791 if (bp->flags & CM_BUF_DIRTY) {
792 /* if the buffer is dirty, start cleaning it and
793 * move on to the next buffer. We do this with
794 * just the lock required to minimize contention
798 lock_ReleaseWrite(&buf_globalLock);
800 /* grab required lock and clean; this only
801 * starts the I/O. By the time we're back,
802 * it'll still be marked dirty, but it will also
803 * have the WRITING flag set, so we won't get
806 buf_CleanAsync(bp, &req);
808 /* now put it back and go around again */
813 /* if we get here, we know that the buffer still has a 0
814 * ref count, and that it is clean and has no currently
815 * pending I/O. This is the dude to return.
816 * Remember that as long as the ref count is 0, we know
817 * that we won't have any lock conflicts, so we can grab
818 * the buffer lock out of order in the locking hierarchy.
822 /* clean up junk flags */
823 bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
824 bp->dataVersion = -1; /* unknown so far */
826 /* now hash in as our new buffer, and give it the
827 * appropriate label, if requested.
830 bp->flags |= CM_BUF_INHASH;
832 bp->offset = *offsetp;
833 i = BUF_HASH(&scp->fid, offsetp);
834 bp->hashp = buf_hashTablepp[i];
835 buf_hashTablepp[i] = bp;
836 i = BUF_FILEHASH(&scp->fid);
837 nextBp = buf_fileHashTablepp[i];
838 bp->fileHashp = nextBp;
839 bp->fileHashBackp = NULL;
841 nextBp->fileHashBackp = bp;
842 buf_fileHashTablepp[i] = bp;
845 /* prepare to return it. Start by giving it a good
849 /* and since it has a non-zero ref count, we should move
850 * it from the lru queue. It better be still there,
851 * since we've held the global (big) lock since we found
854 osi_assertx(bp->flags & CM_BUF_INLRU,
855 "buf_GetNewLocked: LRU screwup");
856 if (buf_freeListEndp == bp) {
857 /* we're the last guy in this queue, so maintain it */
858 buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
860 osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
861 bp->flags &= ~CM_BUF_INLRU;
863 /* finally, grab the mutex so that people don't use it
864 * before the caller fills it with data. Again, no one
865 * should have been able to get to this dude to lock it.
867 osi_assertx(lock_TryMutex(&bp->mx),
868 "buf_GetNewLocked: TryMutex failed");
870 lock_ReleaseWrite(&buf_globalLock);
873 } /* for all buffers in lru queue */
874 lock_ReleaseWrite(&buf_globalLock);
875 } /* while loop over everything */
879 /* get a page, returning it held but unlocked. Doesn't fill in the page
880 * with I/O, since we're going to write the whole thing new.
882 long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
886 osi_hyper_t pageOffset;
890 pageOffset.HighPart = offsetp->HighPart;
891 pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
893 lock_ObtainWrite(&buf_globalLock);
894 bp = buf_LockedFind(scp, &pageOffset);
895 lock_ReleaseWrite(&buf_globalLock);
897 /* lock it and break out */
898 lock_ObtainMutex(&bp->mx);
902 /* otherwise, we have to create a page */
903 code = buf_GetNewLocked(scp, &pageOffset, &bp);
905 /* check if the buffer was created in a race condition branch.
906 * If so, go around so we can hold a reference to it.
908 if (code == CM_BUF_EXISTS)
911 /* something else went wrong */
915 /* otherwise, we have a locked buffer that we just created */
918 } /* big while loop */
921 if (bp->flags & CM_BUF_READING)
924 /* once it has been read once, we can unlock it and return it, still
925 * with its refcount held.
927 lock_ReleaseMutex(&bp->mx);
929 osi_Log3(buf_logp, "buf_GetNew returning bp 0x%x for file 0x%x, offset 0x%x",
930 bp, (long) scp, offsetp->LowPart);
934 /* get a page, returning it held but unlocked. Make sure it is complete */
935 long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
939 osi_hyper_t pageOffset;
940 unsigned long tcount;
944 #endif /* DISKCACHE95 */
947 pageOffset.HighPart = offsetp->HighPart;
948 pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
950 lock_ObtainWrite(&buf_globalLock);
951 bp = buf_LockedFind(scp, &pageOffset);
952 lock_ReleaseWrite(&buf_globalLock);
954 /* lock it and break out */
955 lock_ObtainMutex(&bp->mx);
959 /* touch disk chunk to update LRU info */
960 diskcache_Touch(bp->dcp);
961 #endif /* DISKCACHE95 */
964 /* otherwise, we have to create a page */
965 code = buf_GetNewLocked(scp, &pageOffset, &bp);
967 /* check if the buffer was created in a race condition branch.
968 * If so, go around so we can hold a reference to it.
970 if (code == CM_BUF_EXISTS)
973 /* something else went wrong */
977 /* otherwise, we have a locked buffer that we just created */
980 } /* big while loop */
982 /* if we get here, we have a locked buffer that may have just been
983 * created, in which case it needs to be filled with data.
986 /* load the page; freshly created pages should be idle */
987 osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)));
989 /* setup offset, event */
990 #ifndef DJGPP /* doesn't seem to be used */
991 bp->over.Offset = bp->offset.LowPart;
992 bp->over.OffsetHigh = bp->offset.HighPart;
995 /* start the I/O; may drop lock */
996 bp->flags |= CM_BUF_READING;
997 code = (*cm_buf_opsp->Readp)(bp, buf_bufferSize, &tcount, NULL);
1000 code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, buf_bufferSize, &bp->dataVersion, &tcount, &dcp);
1001 bp->dcp = dcp; /* pointer to disk cache struct. */
1002 #endif /* DISKCACHE95 */
1005 /* failure or queued */
1006 #ifndef DJGPP /* cm_bufRead always returns 0 */
1007 if (code != ERROR_IO_PENDING) {
1010 bp->flags |= CM_BUF_ERROR;
1011 bp->flags &= ~CM_BUF_READING;
1012 if (bp->flags & CM_BUF_WAITING) {
1013 bp->flags &= ~CM_BUF_WAITING;
1014 osi_Wakeup((long) bp);
1016 lock_ReleaseMutex(&bp->mx);
1023 /* otherwise, I/O completed instantly and we're done, except
1024 * for padding the xfr out with 0s and checking for EOF
1026 if (tcount < (unsigned long) buf_bufferSize) {
1027 memset(bp->datap+tcount, 0, buf_bufferSize - tcount);
1029 bp->flags |= CM_BUF_EOF;
1031 bp->flags &= ~CM_BUF_READING;
1032 if (bp->flags & CM_BUF_WAITING) {
1033 bp->flags &= ~CM_BUF_WAITING;
1034 osi_Wakeup((long) bp);
1040 /* wait for reads, either that which we started above, or that someone
1041 * else started. We don't care if we return a buffer being cleaned.
1043 if (bp->flags & CM_BUF_READING)
1046 /* once it has been read once, we can unlock it and return it, still
1047 * with its refcount held.
1049 lock_ReleaseMutex(&bp->mx);
1052 /* now remove from queue; will be put in at the head (farthest from
1053 * being recycled) when we're done in buf_Release.
1055 lock_ObtainWrite(&buf_globalLock);
1056 if (bp->flags & CM_BUF_INLRU) {
1057 if (buf_freeListEndp == bp)
1058 buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
1059 osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
1060 bp->flags &= ~CM_BUF_INLRU;
1062 lock_ReleaseWrite(&buf_globalLock);
1064 osi_Log3(buf_logp, "buf_Get returning bp 0x%x for file 0x%x, offset 0x%x",
1065 bp, (long) scp, offsetp->LowPart);
1069 /* count # of elements in the free list;
1070 * we don't bother doing the proper locking for accessing dataVersion or flags
1071 * since it is a pain, and this is really just an advisory call. If you need
1072 * to do better at some point, rewrite this function.
1074 long buf_CountFreeList(void)
1080 lock_ObtainRead(&buf_globalLock);
1081 for(bufp = buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
1082 /* if the buffer doesn't have an identity, or if the buffer
1083 * has been invalidate (by having its DV stomped upon), then
1084 * count it as free, since it isn't really being utilized.
1086 if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
1089 lock_ReleaseRead(&buf_globalLock);
1093 /* clean a buffer synchronously */
1094 void buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
1096 lock_ObtainMutex(&bp->mx);
1097 buf_LockedCleanAsync(bp, reqp);
1098 lock_ReleaseMutex(&bp->mx);
1101 /* wait for a buffer's cleaning to finish */
1102 void buf_CleanWait(cm_buf_t *bp)
1104 lock_ObtainMutex(&bp->mx);
1105 if (bp->flags & CM_BUF_WRITING) {
1108 lock_ReleaseMutex(&bp->mx);
1111 /* set the dirty flag on a buffer, and set associated write-ahead log,
1112 * if there is one. Allow one to be added to a buffer, but not changed.
1114 * The buffer must be locked before calling this routine.
1116 void buf_SetDirty(cm_buf_t *bp)
1118 osi_assert(bp->refCount > 0);
1120 osi_Log1(buf_logp, "buf_SetDirty 0x%x", bp);
1123 bp->flags |= CM_BUF_DIRTY;
1125 /* and turn off EOF flag, since it has associated data now */
1126 bp->flags &= ~CM_BUF_EOF;
1129 /* clean all buffers, reset log pointers and invalidate all buffers.
1130 * Called with no locks held, and returns with same.
1132 * This function is guaranteed to clean and remove the log ptr of all the
1133 * buffers that were dirty or had non-zero log ptrs before the call was
1134 * made. That's sufficient to clean up any garbage left around by recovery,
1135 * which is all we're counting on this for; there may be newly created buffers
1136 * added while we're running, but that should be OK.
1138 * In an environment where there are no transactions (artificially imposed, for
1139 * example, when switching the database to raw mode), this function is used to
1140 * make sure that all updates have been written to the disk. In that case, we don't
1141 * really require that we forget the log association between pages and logs, but
1142 * it also doesn't hurt. Since raw mode I/O goes through this buffer package, we don't
1143 * have to worry about invalidating data in the buffers.
1145 * This function is used at the end of recovery as paranoia to get the recovered
1146 * database out to disk. It removes all references to the recovery log and cleans
1149 long buf_CleanAndReset(void)
1155 lock_ObtainWrite(&buf_globalLock);
1156 for(i=0; i<buf_hashSize; i++) {
1157 for(bp = buf_hashTablepp[i]; bp; bp = bp->hashp) {
1159 lock_ReleaseWrite(&buf_globalLock);
1161 /* now no locks are held; clean buffer and go on */
1163 buf_CleanAsync(bp, &req);
1166 /* relock and release buffer */
1167 lock_ObtainWrite(&buf_globalLock);
1168 buf_LockedRelease(bp);
1169 } /* over one bucket */
1170 } /* for loop over all hash buckets */
1173 lock_ReleaseWrite(&buf_globalLock);
1175 /* and we're done */
1179 /* called without global lock being held, reserves buffers for callers
1180 * that need more than one held (not locked) at once.
1182 void buf_ReserveBuffers(long nbuffers)
1184 lock_ObtainWrite(&buf_globalLock);
1186 if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
1187 buf_reserveWaiting = 1;
1188 osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
1189 osi_SleepW((long) &buf_reservedBufs, &buf_globalLock);
1190 lock_ObtainWrite(&buf_globalLock);
1193 buf_reservedBufs += nbuffers;
1197 lock_ReleaseWrite(&buf_globalLock);
1200 int buf_TryReserveBuffers(long nbuffers)
1204 lock_ObtainWrite(&buf_globalLock);
1205 if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
1209 buf_reservedBufs += nbuffers;
1212 lock_ReleaseWrite(&buf_globalLock);
1216 /* called without global lock held, releases reservation held by
1217 * buf_ReserveBuffers.
1219 void buf_UnreserveBuffers(long nbuffers)
1221 lock_ObtainWrite(&buf_globalLock);
1222 buf_reservedBufs -= nbuffers;
1223 if (buf_reserveWaiting) {
1224 buf_reserveWaiting = 0;
1225 osi_Wakeup((long) &buf_reservedBufs);
1227 lock_ReleaseWrite(&buf_globalLock);
1230 /* truncate the buffers past sizep, zeroing out the page, if we don't
1231 * end on a page boundary.
1233 * Requires cm_bufCreateLock to be write locked.
1235 long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
1239 cm_buf_t *nbufp; /* next buffer, if didRelease */
1246 /* assert that cm_bufCreateLock is held in write mode */
1247 lock_AssertWrite(&scp->bufCreateLock);
1249 i = BUF_FILEHASH(&scp->fid);
1251 lock_ObtainWrite(&buf_globalLock);
1252 bufp = buf_fileHashTablepp[i];
1254 lock_ReleaseWrite(&buf_globalLock);
1259 lock_ReleaseWrite(&buf_globalLock);
1260 for(; bufp; bufp = nbufp) {
1262 lock_ObtainMutex(&bufp->mx);
1264 bufEnd.HighPart = 0;
1265 bufEnd.LowPart = buf_bufferSize;
1266 bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
1268 if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
1269 LargeIntegerLessThan(*sizep, bufEnd)) {
1272 lock_ObtainMutex(&scp->mx);
1274 /* make sure we have a callback (so we have the right value for
1275 * the length), and wait for it to be safe to do a truncate.
1277 code = cm_SyncOp(scp, bufp, userp, reqp, 0,
1278 CM_SCACHESYNC_NEEDCALLBACK
1279 | CM_SCACHESYNC_GETSTATUS
1280 | CM_SCACHESYNC_SETSIZE
1281 | CM_SCACHESYNC_BUFLOCKED);
1282 /* if we succeeded in our locking, and this applies to the right
1283 * file, and the truncate request overlaps the buffer either
1284 * totally or partially, then do something.
1286 if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
1287 && LargeIntegerLessThan(*sizep, bufEnd)) {
1289 lock_ObtainWrite(&buf_globalLock);
1291 /* destroy the buffer, turning off its dirty bit, if
1292 * we're truncating the whole buffer. Otherwise, set
1293 * the dirty bit, and clear out the tail of the buffer
1294 * if we just overlap some.
1296 if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
1297 /* truncating the entire page */
1298 bufp->flags &= ~CM_BUF_DIRTY;
1299 bufp->dataVersion = -1; /* known bad */
1300 bufp->dirtyCounter++;
1303 /* don't set dirty, since dirty implies
1304 * currently up-to-date. Don't need to do this,
1305 * since we'll update the length anyway.
1307 * Zero out remainder of the page, in case we
1308 * seek and write past EOF, and make this data
1311 bufferPos = sizep->LowPart & (buf_bufferSize - 1);
1312 osi_assert(bufferPos != 0);
1313 memset(bufp->datap + bufferPos, 0,
1314 buf_bufferSize - bufferPos);
1317 lock_ReleaseWrite(&buf_globalLock);
1320 lock_ReleaseMutex(&scp->mx);
1321 lock_ReleaseMutex(&bufp->mx);
1323 lock_ObtainWrite(&buf_globalLock);
1324 nbufp = bufp->fileHashp;
1325 if (nbufp) nbufp->refCount++;
1326 buf_LockedRelease(bufp);
1327 lock_ReleaseWrite(&buf_globalLock);
1330 /* bail out early if we fail */
1332 /* at this point, nbufp is held; bufp has already been
1345 long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
1348 cm_buf_t *bp; /* buffer we're hacking on */
1353 i = BUF_FILEHASH(&scp->fid);
1356 lock_ObtainWrite(&buf_globalLock);
1357 bp = buf_fileHashTablepp[i];
1358 if (bp) bp->refCount++;
1359 lock_ReleaseWrite(&buf_globalLock);
1360 for(; bp; bp = nbp) {
1361 didRelease = 0; /* haven't released this buffer yet */
1363 /* clean buffer synchronously */
1364 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1365 lock_ObtainMutex(&bp->mx);
1367 /* start cleaning the buffer, and wait for it to finish */
1368 buf_LockedCleanAsync(bp, reqp);
1370 lock_ReleaseMutex(&bp->mx);
1372 code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
1373 if (code) goto skip;
1375 lock_ObtainWrite(&buf_globalLock);
1376 /* actually, we only know that buffer is clean if ref
1377 * count is 1, since we don't have buffer itself locked.
1379 if (!(bp->flags & CM_BUF_DIRTY)) {
1380 if (bp->refCount == 1) { /* bp is held above */
1381 buf_LockedRelease(bp);
1382 nbp = bp->fileHashp;
1383 if (nbp) nbp->refCount++;
1388 lock_ReleaseWrite(&buf_globalLock);
1390 (*cm_buf_opsp->Unstabilizep)(scp, userp);
1395 lock_ObtainWrite(&buf_globalLock);
1396 if (nbp = bp->fileHashp) nbp->refCount++;
1397 buf_LockedRelease(bp);
1398 lock_ReleaseWrite(&buf_globalLock);
1400 } /* for loop over a bunch of buffers */
1406 long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
1409 cm_buf_t *bp; /* buffer we're hacking on */
1410 cm_buf_t *nbp; /* next one */
1413 i = BUF_FILEHASH(&scp->fid);
1416 lock_ObtainWrite(&buf_globalLock);
1417 bp = buf_fileHashTablepp[i];
1418 if (bp) bp->refCount++;
1419 lock_ReleaseWrite(&buf_globalLock);
1420 for(; bp; bp = nbp) {
1421 /* clean buffer synchronously */
1422 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1425 lock_ObtainMutex(&bp->mx);
1427 cm_ReleaseUser(bp->userp);
1429 lock_ReleaseMutex(&bp->mx);
1431 buf_CleanAsync(bp, reqp);
1433 lock_ObtainMutex(&bp->mx);
1434 if (bp->flags & CM_BUF_ERROR) {
1435 if (code == 0 || code == -1)
1440 lock_ReleaseMutex(&bp->mx);
1443 lock_ObtainWrite(&buf_globalLock);
1444 buf_LockedRelease(bp);
1445 nbp = bp->fileHashp;
1446 if (nbp) nbp->refCount++;
1447 lock_ReleaseWrite(&buf_globalLock);
1448 } /* for loop over a bunch of buffers */
1454 /* dump the contents of the buf_hashTablepp. */
1455 int cm_DumpBufHashTable(FILE *outputFile, char *cookie)
1462 if (buf_hashTablepp == NULL)
1465 lock_ObtainRead(&buf_globalLock);
1467 sprintf(output, "%s - dumping buf_HashTable - buf_hashSize=%d\n", cookie, buf_hashSize);
1468 WriteFile(outputFile, output, strlen(output), &zilch, NULL);
1470 for (i = 0; i < buf_hashSize; i++)
1472 for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp)
1476 sprintf(output, "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d,"
1477 "vnode=%d, unique=%d), size=%d refCount=%d\n",
1478 cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume,
1479 bp->fid.vnode, bp->fid.unique, bp->size, bp->refCount);
1480 WriteFile(outputFile, output, strlen(output), &zilch, NULL);
1485 sprintf(output, "%s - Done dumping buf_HashTable.\n", cookie);
1486 WriteFile(outputFile, output, strlen(output), &zilch, NULL);
1488 lock_ReleaseRead(&buf_globalLock);
1492 void buf_ForceTrace(BOOL flush)
1501 len = GetTempPath(sizeof(buf)-10, buf);
1502 StringCbCopyA(&buf[len], sizeof(buf)-len, "/afs-buffer.log");
1503 handle = CreateFile(buf, GENERIC_WRITE, FILE_SHARE_READ,
1504 NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
1505 if (handle == INVALID_HANDLE_VALUE) {
1506 osi_panic("Cannot create log file", __FILE__, __LINE__);
1508 osi_LogPrint(buf_logp, handle);
1510 FlushFileBuffers(handle);
1511 CloseHandle(handle);