#include <windows.h>
#endif
#include <osi.h>
-#include <malloc.h>
#include <stdio.h>
#include <assert.h>
+#include <strsafe.h>
#include "afsd.h"
+#include "cm_memmap.h"
+
+#ifdef DEBUG
+#define TRACE_BUFFER 1
+#endif
extern void afsi_log(char *pattern, ...);
* to put stuff in buf_freeListp, and maintain the end
* pointer manually
*/
-cm_buf_t *buf_freeListp;
-cm_buf_t *buf_freeListEndp;
/* a pointer to a list of all buffers, just so that we can find them
* easily for debugging, and for the incr syncer. Locked under
* the global lock.
*/
-cm_buf_t *buf_allp;
/* defaults setup; these variables may be manually assigned into
* before calling cm_Init, as a way of changing these defaults.
*/
-long buf_nbuffers = CM_BUF_BUFFERS;
-long buf_nOrigBuffers;
-long buf_bufferSize = CM_BUF_SIZE;
-long buf_hashSize = CM_BUF_HASHSIZE;
-
-#ifndef DJGPP
-static
-HANDLE CacheHandle;
-
-static
-SYSTEM_INFO sysInfo;
-#endif /* !DJGPP */
-
-/* buffer reservation variables */
-long buf_reservedBufs;
-long buf_maxReservedBufs;
-int buf_reserveWaiting;
/* callouts for reading and writing data, etc */
cm_buf_ops_t *cm_buf_opsp;
-/* pointer to hash table; size computed dynamically */
-cm_buf_t **buf_hashTablepp;
-
-/* another hash table */
-cm_buf_t **buf_fileHashTablepp;
-
#ifdef DISKCACHE95
/* for experimental disk caching support in Win95 client */
cm_buf_t *buf_diskFreeListp;
extern int cm_diskCacheEnabled;
#endif /* DISKCACHE95 */
+/* set this to 1 when we are terminating to prevent access attempts */
+static int buf_ShutdownFlag = 0;
+
/* hold a reference to an already held buffer */
void buf_Hold(cm_buf_t *bp)
{
- lock_ObtainWrite(&buf_globalLock);
- bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
+ osi_assert(bp->magic == CM_BUF_MAGIC);
+ lock_ObtainWrite(&buf_globalLock);
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
}
/* incremental sync daemon. Writes 1/10th of all the buffers every 5000 ms */
void buf_IncrSyncer(long parm)
{
- cm_buf_t *bp; /* buffer we're hacking on; held */
- long i; /* counter */
- long nAtOnce; /* how many to do at once */
- cm_req_t req;
-
- lock_ObtainWrite(&buf_globalLock);
- bp = buf_allp;
- bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
- nAtOnce = buf_nbuffers / 10;
- while (1) {
+ cm_buf_t *bp; /* buffer we're hacking on; held */
+ long i; /* counter */
+ long nAtOnce; /* how many to do at once */
+ cm_req_t req;
+
+ lock_ObtainWrite(&buf_globalLock);
+ bp = cm_data.buf_allp;
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+ nAtOnce = cm_data.buf_nbuffers / 10;
+ while (buf_ShutdownFlag == 0) {
#ifndef DJGPP
- i = SleepEx(5000, 1);
- if (i != 0) continue;
+ i = SleepEx(5000, 1);
+ if (i != 0) continue;
#else
- thrd_Sleep(5000);
+ thrd_Sleep(5000);
#endif /* DJGPP */
-
- /* now go through our percentage of the buffers */
- for(i=0; i<nAtOnce; i++) {
- /* don't want its identity changing while we're
- * messing with it, so must do all of this with
- * bp held.
- */
-
- /* start cleaning the buffer; don't touch log pages since
- * the log code counts on knowing exactly who is writing
- * a log page at any given instant.
- */
- cm_InitReq(&req);
- req.flags |= CM_REQ_NORETRY;
- buf_CleanAsync(bp, &req);
-
- /* now advance to the next buffer; the allp chain never changes,
- * and so can be followed even when holding no locks.
- */
- lock_ObtainWrite(&buf_globalLock);
- buf_LockedRelease(bp);
- bp = bp->allp;
- if (!bp) bp = buf_allp;
- bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
- } /* for loop over a bunch of buffers */
- } /* whole daemon's while loop */
+
+ if (buf_ShutdownFlag == 1)
+ return;
+
+ /* now go through our percentage of the buffers */
+ for (i=0; i<nAtOnce; i++) {
+ /* don't want its identity changing while we're
+ * messing with it, so must do all of this with
+ * bp held.
+ */
+
+ /* start cleaning the buffer; don't touch log pages since
+ * the log code counts on knowing exactly who is writing
+ * a log page at any given instant.
+ */
+ cm_InitReq(&req);
+ req.flags |= CM_REQ_NORETRY;
+ buf_CleanAsync(bp, &req);
+
+ /* now advance to the next buffer; the allp chain never changes,
+ * and so can be followed even when holding no locks.
+ */
+ lock_ObtainWrite(&buf_globalLock);
+ buf_LockedRelease(bp);
+ bp = bp->allp;
+ if (!bp)
+ bp = cm_data.buf_allp;
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+ } /* for loop over a bunch of buffers */
+ } /* whole daemon's while loop */
}
-#ifndef DJGPP
-/* Create a security attribute structure suitable for use when the cache file
- * is created. What we mainly want is that only the administrator should be
- * able to do anything with the file. We create an ACL with only one entry,
- * an entry that grants all rights to the administrator.
- */
-PSECURITY_ATTRIBUTES CreateCacheFileSA()
+long
+buf_ValidateBuffers(void)
{
- PSECURITY_ATTRIBUTES psa;
- PSECURITY_DESCRIPTOR psd;
- SID_IDENTIFIER_AUTHORITY authority = SECURITY_NT_AUTHORITY;
- PSID AdminSID;
- DWORD AdminSIDlength;
- PACL AdminOnlyACL;
- DWORD ACLlength;
-
- /* Get Administrator SID */
- AllocateAndInitializeSid(&authority, 2,
- SECURITY_BUILTIN_DOMAIN_RID,
- DOMAIN_ALIAS_RID_ADMINS,
- 0, 0, 0, 0, 0, 0,
- &AdminSID);
-
- /* Create Administrator-only ACL */
- AdminSIDlength = GetLengthSid(AdminSID);
- ACLlength = sizeof(ACL) + sizeof(ACCESS_ALLOWED_ACE)
- + AdminSIDlength - sizeof(DWORD);
- AdminOnlyACL = GlobalAlloc(GMEM_FIXED, ACLlength);
- InitializeAcl(AdminOnlyACL, ACLlength, ACL_REVISION);
- AddAccessAllowedAce(AdminOnlyACL, ACL_REVISION,
- STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL,
- AdminSID);
-
- /* Create security descriptor */
- psd = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_DESCRIPTOR));
- InitializeSecurityDescriptor(psd, SECURITY_DESCRIPTOR_REVISION);
- SetSecurityDescriptorDacl(psd, TRUE, AdminOnlyACL, FALSE);
-
- /* Create security attributes structure */
- psa = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_ATTRIBUTES));
- psa->nLength = sizeof(SECURITY_ATTRIBUTES);
- psa->lpSecurityDescriptor = psd;
- psa->bInheritHandle = TRUE;
-
- return psa;
-}
-#endif /* !DJGPP */
+ cm_buf_t * bp, *bpf, *bpa, *bpb;
+ afs_uint32 countb = 0, countf = 0, counta = 0;
+
+ for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
+ if (bp->magic != CM_BUF_MAGIC) {
+ afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
+ fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
+ return -1;
+ }
+ countb++;
+ bpb = bp;
-#ifndef DJGPP
-/* Free a security attribute structure created by CreateCacheFileSA() */
-VOID FreeCacheFileSA(PSECURITY_ATTRIBUTES psa)
-{
- BOOL b1, b2;
- PACL pAcl;
+ if (countb > cm_data.buf_nbuffers) {
+ afsi_log("cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers");
+ fprintf(stderr, "cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers\n");
+ return -6;
+ }
+ }
+
+ for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
+ if (bp->magic != CM_BUF_MAGIC) {
+ afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
+ fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
+ return -2;
+ }
+ countf++;
+ bpf = bp;
+
+ if (countf > cm_data.buf_nbuffers) {
+ afsi_log("cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers");
+ fprintf(stderr, "cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers\n");
+ return -7;
+ }
+ }
+
+ for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
+ if (bp->magic != CM_BUF_MAGIC) {
+ afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
+ fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
+ return -3;
+ }
+ counta++;
+ bpa = bp;
- GetSecurityDescriptorDacl(psa->lpSecurityDescriptor, &b1, &pAcl, &b2);
- GlobalFree(pAcl);
- GlobalFree(psa->lpSecurityDescriptor);
- GlobalFree(psa);
+ if (counta > cm_data.buf_nbuffers) {
+ afsi_log("cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers");
+ fprintf(stderr, "cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers\n");
+ return -8;
+ }
+ }
+
+ if (countb != countf) {
+ afsi_log("cm_ValidateBuffers failure: countb != countf");
+ fprintf(stderr, "cm_ValidateBuffers failure: countb != countf\n");
+ return -4;
+ }
+
+ if (counta != cm_data.buf_nbuffers) {
+ afsi_log("cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers");
+ fprintf(stderr, "cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers\n");
+ return -5;
+ }
+
+ return 0;
}
-#endif /* !DJGPP */
-
+
+void buf_Shutdown(void)
+{
+ buf_ShutdownFlag = 1;
+}
+
/* initialize the buffer package; called with no locks
* held during the initialization phase.
*/
-long buf_Init(cm_buf_ops_t *opsp)
+long buf_Init(int newFile, cm_buf_ops_t *opsp, long nbuffers)
{
- static osi_once_t once;
- cm_buf_t *bp;
- long sectorSize;
- thread_t phandle;
-#ifndef DJGPP
- HANDLE hf, hm;
- PSECURITY_ATTRIBUTES psa;
-#endif /* !DJGPP */
- long i;
- unsigned long pid;
- char *data;
- long cs;
-
-#ifndef DJGPP
- /* Get system info; all we really want is the allocation granularity */
- GetSystemInfo(&sysInfo);
-#endif /* !DJGPP */
-
- /* Have to be able to reserve a whole chunk */
- if (((buf_nbuffers - 3) * buf_bufferSize) < cm_chunkSize)
- return CM_ERROR_TOOFEWBUFS;
-
- /* recall for callouts */
- cm_buf_opsp = opsp;
-
- if (osi_Once(&once)) {
- /* initialize global locks */
- lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
-
-#ifndef DJGPP
- /*
- * Cache file mapping constrained by
- * system allocation granularity;
- * round up, assuming granularity is a power of two
- */
- cs = buf_nbuffers * buf_bufferSize;
- cs = (cs + (sysInfo.dwAllocationGranularity - 1))
- & ~(sysInfo.dwAllocationGranularity - 1);
- if (cs != buf_nbuffers * buf_bufferSize) {
- buf_nbuffers = cs / buf_bufferSize;
- afsi_log("Cache size rounded up to %d buffers",
- buf_nbuffers);
- }
-#endif /* !DJGPP */
-
- /* remember this for those who want to reset it */
- buf_nOrigBuffers = buf_nbuffers;
-
- /* lower hash size to a prime number */
- buf_hashSize = osi_PrimeLessThan(buf_hashSize);
-
- /* create hash table */
- buf_hashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
- memset((void *)buf_hashTablepp, 0,
- buf_hashSize * sizeof(cm_buf_t *));
+ static osi_once_t once;
+ cm_buf_t *bp;
+ thread_t phandle;
+ long i;
+ unsigned long pid;
+ char *data;
+
+ if ( newFile ) {
+ if (nbuffers)
+ cm_data.buf_nbuffers = nbuffers;
+
+ /* Have to be able to reserve a whole chunk */
+ if (((cm_data.buf_nbuffers - 3) * cm_data.buf_blockSize) < cm_chunkSize)
+ return CM_ERROR_TOOFEWBUFS;
+ }
- /* another hash table */
- buf_fileHashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
- memset((void *)buf_fileHashTablepp, 0,
- buf_hashSize * sizeof(cm_buf_t *));
+ /* recall for callouts */
+ cm_buf_opsp = opsp;
+
+ if (osi_Once(&once)) {
+ /* initialize global locks */
+ lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
+
+ if ( newFile ) {
+ /* remember this for those who want to reset it */
+ cm_data.buf_nOrigBuffers = cm_data.buf_nbuffers;
+
+ /* lower hash size to a prime number */
+ cm_data.buf_hashSize = osi_PrimeLessThan(CM_BUF_HASHSIZE);
+
+ /* create hash table */
+ memset((void *)cm_data.buf_hashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
+
+ /* another hash table */
+ memset((void *)cm_data.buf_fileHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
+
+ /* create buffer headers and put in free list */
+ bp = cm_data.bufHeaderBaseAddress;
+ data = cm_data.bufDataBaseAddress;
+ cm_data.buf_allp = NULL;
+
+ for (i=0; i<cm_data.buf_nbuffers; i++) {
+ osi_assert(bp >= cm_data.bufHeaderBaseAddress && bp < (cm_buf_t *)cm_data.bufDataBaseAddress);
+ osi_assert(data >= cm_data.bufDataBaseAddress && data < cm_data.bufEndOfData);
- /* min value for which this works */
- sectorSize = 1;
-
-#ifndef DJGPP
- /* Reserve buffer space by mapping cache file */
- psa = CreateCacheFileSA();
- hf = CreateFile(cm_CachePath,
- GENERIC_READ | GENERIC_WRITE,
- FILE_SHARE_READ | FILE_SHARE_WRITE,
- psa,
- OPEN_ALWAYS,
- FILE_ATTRIBUTE_NORMAL,
- NULL);
- if (hf == INVALID_HANDLE_VALUE) {
- afsi_log("create file error %d", GetLastError());
- return CM_ERROR_INVAL;
- }
- FreeCacheFileSA(psa);
- CacheHandle = hf;
- hm = CreateFileMapping(hf,
- NULL,
- PAGE_READWRITE,
- 0, buf_nbuffers * buf_bufferSize,
- NULL);
- if (hm == NULL) {
- if (GetLastError() == ERROR_DISK_FULL) {
- afsi_log("Error creating cache file mapping: disk full");
- return CM_ERROR_TOOMANYBUFS;
- }
- return CM_ERROR_INVAL;
- }
- data = MapViewOfFile(hm,
- FILE_MAP_ALL_ACCESS,
- 0, 0,
- buf_nbuffers * buf_bufferSize);
- if (data == NULL) {
- CloseHandle(hf);
- CloseHandle(hm);
- return CM_ERROR_INVAL;
- }
- CloseHandle(hm);
-#else
- /* djgpp doesn't support memory mapped files */
- data = malloc(buf_nbuffers * buf_bufferSize);
-#endif /* !DJGPP */
-
- /* create buffer headers and put in free list */
- bp = malloc(buf_nbuffers * sizeof(cm_buf_t));
- buf_allp = NULL;
- for(i=0; i<buf_nbuffers; i++) {
- /* allocate and zero some storage */
- memset(bp, 0, sizeof(cm_buf_t));
-
- /* thread on list of all buffers */
- bp->allp = buf_allp;
- buf_allp = bp;
-
- osi_QAdd((osi_queue_t **)&buf_freeListp, &bp->q);
- bp->flags |= CM_BUF_INLRU;
- lock_InitializeMutex(&bp->mx, "Buffer mutex");
-
- /* grab appropriate number of bytes from aligned zone */
- bp->datap = data;
-
- /* setup last buffer pointer */
- if (i == 0)
- buf_freeListEndp = bp;
-
- /* next */
- bp++;
- data += buf_bufferSize;
- }
+ /* allocate and zero some storage */
+ memset(bp, 0, sizeof(cm_buf_t));
+ bp->magic = CM_BUF_MAGIC;
+ /* thread on list of all buffers */
+ bp->allp = cm_data.buf_allp;
+ cm_data.buf_allp = bp;
- /* none reserved at first */
- buf_reservedBufs = 0;
+ osi_QAdd((osi_queue_t **)&cm_data.buf_freeListp, &bp->q);
+ bp->flags |= CM_BUF_INLRU;
+ lock_InitializeMutex(&bp->mx, "Buffer mutex");
- /* just for safety's sake */
- buf_maxReservedBufs = buf_nbuffers - 3;
+ /* grab appropriate number of bytes from aligned zone */
+ bp->datap = data;
- /* init the buffer trace log */
- buf_logp = osi_LogCreate("buffer", 10);
+ /* setup last buffer pointer */
+ if (i == 0)
+ cm_data.buf_freeListEndp = bp;
+
+ /* next */
+ bp++;
+ data += cm_data.buf_blockSize;
+ }
+
+ /* none reserved at first */
+ cm_data.buf_reservedBufs = 0;
+
+ /* just for safety's sake */
+ cm_data.buf_maxReservedBufs = cm_data.buf_nbuffers - 3;
+ } else {
+ bp = cm_data.bufHeaderBaseAddress;
+ data = cm_data.bufDataBaseAddress;
+
+ for (i=0; i<cm_data.buf_nbuffers; i++) {
+ lock_InitializeMutex(&bp->mx, "Buffer mutex");
+ bp->userp = NULL;
+ bp->waitCount = 0;
+ bp->waitRequests = 0;
+ bp->flags &= ~CM_BUF_WAITING;
+ bp++;
+ }
+ }
+
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+
+#ifdef TRACE_BUFFER
+ /* init the buffer trace log */
+ buf_logp = osi_LogCreate("buffer", 1000);
+ osi_LogEnable(buf_logp);
+#endif
- osi_EndOnce(&once);
-
- /* and create the incr-syncer */
- phandle = thrd_Create(0, 0,
- (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
- "buf_IncrSyncer");
+ osi_EndOnce(&once);
+
+ /* and create the incr-syncer */
+ phandle = thrd_Create(0, 0,
+ (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
+ "buf_IncrSyncer");
- osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
+ osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
#ifndef DJGPP
- CloseHandle(phandle);
+ CloseHandle(phandle);
#endif /* !DJGPP */
- }
+ }
- return 0;
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+ return 0;
}
/* add nbuffers to the buffer pool, if possible.
*/
long buf_AddBuffers(long nbuffers)
{
- cm_buf_t *bp;
- int i;
- char *data;
#ifndef DJGPP
- HANDLE hm;
- long cs;
-
- /*
- * Cache file mapping constrained by
- * system allocation granularity;
- * round up, assuming granularity is a power of two;
- * assume existing cache size is already rounded
- */
- cs = nbuffers * buf_bufferSize;
- cs = (cs + (sysInfo.dwAllocationGranularity - 1))
- & ~(sysInfo.dwAllocationGranularity - 1);
- if (cs != nbuffers * buf_bufferSize) {
- nbuffers = cs / buf_bufferSize;
- }
-
- /* Reserve additional buffer space by remapping cache file */
- hm = CreateFileMapping(CacheHandle,
- NULL,
- PAGE_READWRITE,
- 0, (buf_nbuffers + nbuffers) * buf_bufferSize,
- NULL);
- if (hm == NULL) {
- if (GetLastError() == ERROR_DISK_FULL)
- return CM_ERROR_TOOMANYBUFS;
- else
- return CM_ERROR_INVAL;
- }
- data = MapViewOfFile(hm,
- FILE_MAP_ALL_ACCESS,
- 0, buf_nbuffers * buf_bufferSize,
- nbuffers * buf_bufferSize);
- if (data == NULL) {
- CloseHandle(hm);
- return CM_ERROR_INVAL;
- }
- CloseHandle(hm);
+ /* The size of a virtual cache cannot be changed after it has
+ * been created. Subsequent calls to MapViewofFile() with
+ * an existing mapping object name would not allow the
+ * object to be resized. Return failure immediately.
+ *
+ * A similar problem now occurs with the persistent cache
+ * given that the memory mapped file now contains a complex
+ * data structure.
+ */
+ afsi_log("request to add %d buffers to the existing cache of size %d denied",
+ nbuffers, cm_data.buf_nbuffers);
+
+ return CM_ERROR_INVAL;
#else
- data = malloc(buf_nbuffers * buf_bufferSize);
-#endif /* DJGPP */
+ cm_buf_t *bp;
+ int i;
+ char *data;
+
+ data = malloc(buf_nbuffers * cm_data.buf_blockSize);
- /* Create buffer headers and put in free list */
- bp = malloc(nbuffers * sizeof(*bp));
+ /* Create buffer headers and put in free list */
+ bp = malloc(nbuffers * sizeof(*bp));
- for(i=0; i<nbuffers; i++) {
- memset(bp, 0, sizeof(*bp));
+ for (i=0; i<nbuffers; i++) {
+ memset(bp, 0, sizeof(*bp));
- lock_InitializeMutex(&bp->mx, "cm_buf_t");
+ lock_InitializeMutex(&bp->mx, "cm_buf_t");
- /* grab appropriate number of bytes from aligned zone */
- bp->datap = data;
+ /* grab appropriate number of bytes from aligned zone */
+ bp->datap = data;
- bp->flags |= CM_BUF_INLRU;
-
- lock_ObtainWrite(&buf_globalLock);
- /* note that buf_allp chain is covered by buf_globalLock now */
- bp->allp = buf_allp;
- buf_allp = bp;
- osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
- if (!buf_freeListEndp) buf_freeListEndp = bp;
- buf_nbuffers++;
- lock_ReleaseWrite(&buf_globalLock);
+ bp->flags |= CM_BUF_INLRU;
+
+ lock_ObtainWrite(&buf_globalLock);
+ /* note that buf_allp chain is covered by buf_globalLock now */
+ bp->allp = cm_data.buf_allp;
+ cm_data.buf_allp = bp;
+ osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
+ if (!cm_data.buf_freeListEndp)
+ cm_data.buf_freeListEndp = bp;
+ cm_data.buf_nbuffers++;
+ lock_ReleaseWrite(&buf_globalLock);
- bp++;
- data += buf_bufferSize;
+ bp++;
+ data += cm_data.buf_blockSize;
- } /* for loop over all buffers */
+ } /* for loop over all buffers */
- return 0;
-}
+ return 0;
+#endif /* DJGPP */
+}
/* interface to set the number of buffers to an exact figure.
* Called with no locks held.
*/
long buf_SetNBuffers(long nbuffers)
{
- if (nbuffers < 10) return CM_ERROR_INVAL;
- if (nbuffers == buf_nbuffers) return 0;
- else if (nbuffers > buf_nbuffers)
- return buf_AddBuffers(nbuffers - buf_nbuffers);
- else return CM_ERROR_INVAL;
+ if (nbuffers < 10)
+ return CM_ERROR_INVAL;
+ if (nbuffers == cm_data.buf_nbuffers)
+ return 0;
+ else if (nbuffers > cm_data.buf_nbuffers)
+ return buf_AddBuffers(nbuffers - cm_data.buf_nbuffers);
+ else
+ return CM_ERROR_INVAL;
}
/* release a buffer. Buffer must be referenced, but unlocked. */
void buf_Release(cm_buf_t *bp)
{
- lock_ObtainWrite(&buf_globalLock);
- buf_LockedRelease(bp);
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ObtainWrite(&buf_globalLock);
+ buf_LockedRelease(bp);
+ lock_ReleaseWrite(&buf_globalLock);
}
/* wait for reading or writing to clear; called with write-locked
* buffer, and returns with locked buffer.
*/
-void buf_WaitIO(cm_buf_t *bp)
+void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
{
- while (1) {
- /* if no IO is happening, we're done */
- if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
- break;
+ if (scp)
+ osi_assert(scp->magic == CM_SCACHE_MAGIC);
+ osi_assert(bp->magic == CM_BUF_MAGIC);
+
+ while (1) {
+ /* if no IO is happening, we're done */
+ if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
+ break;
- /* otherwise I/O is happening, but some other thread is waiting for
- * the I/O already. Wait for that guy to figure out what happened,
- * and then check again.
- */
- bp->flags |= CM_BUF_WAITING;
- osi_SleepM((long) bp, &bp->mx);
- lock_ObtainMutex(&bp->mx);
- osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%x", bp);
- }
-
- /* if we get here, the IO is done, but we may have to wakeup people waiting for
- * the I/O to complete. Do so.
+ /* otherwise I/O is happening, but some other thread is waiting for
+ * the I/O already. Wait for that guy to figure out what happened,
+ * and then check again.
*/
- if (bp->flags & CM_BUF_WAITING) {
- bp->flags &= ~CM_BUF_WAITING;
- osi_Wakeup((long) bp);
+ if ( bp->flags & CM_BUF_WAITING ) {
+ bp->waitCount++;
+ bp->waitRequests++;
+ osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING already set for 0x%x", bp);
+ } else {
+ osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING set for 0x%x", bp);
+ bp->flags |= CM_BUF_WAITING;
+ bp->waitCount = bp->waitRequests = 1;
+ }
+ osi_SleepM((long) bp, &bp->mx);
+ lock_ObtainMutex(&bp->mx);
+ osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%x", bp);
+ bp->waitCount--;
+ if (bp->waitCount == 0) {
+ osi_Log1(afsd_logp, "buf_WaitIO CM_BUF_WAITING reset for 0x%x", bp);
+ bp->flags &= ~CM_BUF_WAITING;
+ bp->waitRequests = 0;
+ }
+
+ if ( !scp ) {
+ scp = cm_FindSCache(&bp->fid);
}
- osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%x", (long) bp);
+ if ( scp ) {
+ lock_ObtainMutex(&scp->mx);
+ if (scp->flags & CM_SCACHEFLAG_WAITING) {
+ osi_Log1(buf_logp, "buf_WaitIO waking scp 0x%x", scp);
+ osi_Wakeup(&scp->flags);
+ lock_ReleaseMutex(&scp->mx);
+ }
+ }
+ }
+
+ /* if we get here, the IO is done, but we may have to wakeup people waiting for
+ * the I/O to complete. Do so.
+ */
+ if (bp->flags & CM_BUF_WAITING) {
+ osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%x", bp);
+ osi_Wakeup((long) bp);
+ }
+ osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%x", (long) bp);
}
/* code to drop reference count while holding buf_globalLock */
void buf_LockedRelease(cm_buf_t *bp)
{
- /* ensure that we're in the LRU queue if our ref count is 0 */
- osi_assert(bp->refCount > 0);
- if (--bp->refCount == 0) {
- if (!(bp->flags & CM_BUF_INLRU)) {
- osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
-
- /* watch for transition from empty to one element */
- if (!buf_freeListEndp)
- buf_freeListEndp = buf_freeListp;
- bp->flags |= CM_BUF_INLRU;
- }
+ /* ensure that we're in the LRU queue if our ref count is 0 */
+ osi_assert(bp->refCount > 0);
+ if (--bp->refCount == 0) {
+ if (!(bp->flags & CM_BUF_INLRU)) {
+ osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
+
+ /* watch for transition from empty to one element */
+ if (!cm_data.buf_freeListEndp)
+ cm_data.buf_freeListEndp = cm_data.buf_freeListp;
+ bp->flags |= CM_BUF_INLRU;
}
-}
+ }
+}
/* find a buffer, if any, for a particular file ID and offset. Assumes
* that buf_globalLock is write locked when called.
*/
cm_buf_t *buf_LockedFind(struct cm_scache *scp, osi_hyper_t *offsetp)
{
- long i;
- cm_buf_t *bp;
-
- i = BUF_HASH(&scp->fid, offsetp);
- for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) {
- if (cm_FidCmp(&scp->fid, &bp->fid) == 0
- && offsetp->LowPart == bp->offset.LowPart
- && offsetp->HighPart == bp->offset.HighPart) {
- bp->refCount++;
- break;
- }
+ long i;
+ cm_buf_t *bp;
+
+ i = BUF_HASH(&scp->fid, offsetp);
+ for(bp = cm_data.buf_hashTablepp[i]; bp; bp=bp->hashp) {
+ if (cm_FidCmp(&scp->fid, &bp->fid) == 0
+ && offsetp->LowPart == bp->offset.LowPart
+ && offsetp->HighPart == bp->offset.HighPart) {
+ bp->refCount++;
+ break;
}
+ }
- /* return whatever we found, if anything */
- return bp;
+ /* return whatever we found, if anything */
+ return bp;
}
/* find a buffer with offset *offsetp for vnode *scp. Called
*/
cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
{
- cm_buf_t *bp;
+ cm_buf_t *bp;
- lock_ObtainWrite(&buf_globalLock);
- bp = buf_LockedFind(scp, offsetp);
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ObtainWrite(&buf_globalLock);
+ bp = buf_LockedFind(scp, offsetp);
+ lock_ReleaseWrite(&buf_globalLock);
- return bp;
-}
+ return bp;
+}
/* start cleaning I/O on this buffer. Buffer must be write locked, and is returned
* write-locked.
*/
void buf_LockedCleanAsync(cm_buf_t *bp, cm_req_t *reqp)
{
- long code;
+ long code = 0;
- code = 0;
- while ((bp->flags & (CM_BUF_WRITING | CM_BUF_DIRTY)) == CM_BUF_DIRTY) {
- lock_ReleaseMutex(&bp->mx);
+ osi_assert(bp->magic == CM_BUF_MAGIC);
+
+ while ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
+ lock_ReleaseMutex(&bp->mx);
- code = (*cm_buf_opsp->Writep)(&bp->fid, &bp->offset,
- buf_bufferSize, 0, bp->userp,
- reqp);
+ code = (*cm_buf_opsp->Writep)(&bp->fid, &bp->offset,
+ cm_data.buf_blockSize, 0, bp->userp,
+ reqp);
- lock_ObtainMutex(&bp->mx);
- if (code) break;
+ lock_ObtainMutex(&bp->mx);
+ if (code)
+ break;
#ifdef DISKCACHE95
- /* Disk cache support */
- /* write buffer to disk cache (synchronous for now) */
- diskcache_Update(bp->dcp, bp->datap, buf_bufferSize, bp->dataVersion);
+ /* Disk cache support */
+ /* write buffer to disk cache (synchronous for now) */
+ diskcache_Update(bp->dcp, bp->datap, cm_data.buf_blockSize, bp->dataVersion);
#endif /* DISKCACHE95 */
- };
+ };
- /* do logging after call to GetLastError, or else */
- osi_Log2(buf_logp, "buf_CleanAsync starts I/O on 0x%x, done=%d", bp, code);
+ /* do logging after call to GetLastError, or else */
+ osi_Log2(buf_logp, "buf_CleanAsync starts I/O on 0x%x, done=%d", bp, code);
- /* if someone was waiting for the I/O that just completed or failed,
- * wake them up.
- */
- if (bp->flags & CM_BUF_WAITING) {
- /* turn off flags and wakeup users */
- bp->flags &= ~CM_BUF_WAITING;
- osi_Wakeup((long) bp);
- }
+ /* if someone was waiting for the I/O that just completed or failed,
+ * wake them up.
+ */
+ if (bp->flags & CM_BUF_WAITING) {
+ /* turn off flags and wakeup users */
+ osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%x", bp);
+ osi_Wakeup((long) bp);
+ }
}
/* Called with a zero-ref count buffer and with the buf_globalLock write locked.
*/
void buf_Recycle(cm_buf_t *bp)
{
- int i;
- cm_buf_t **lbpp;
- cm_buf_t *tbp;
- cm_buf_t *prevBp, *nextBp;
-
- /* if we get here, we know that the buffer still has a 0 ref count,
- * and that it is clean and has no currently pending I/O. This is
- * the dude to return.
- * Remember that as long as the ref count is 0, we know that we won't
- * have any lock conflicts, so we can grab the buffer lock out of
- * order in the locking hierarchy.
- */
- osi_Log2(buf_logp,
- "buf_Recycle recycles 0x%x, off 0x%x",
- bp, bp->offset.LowPart);
-
- osi_assert(bp->refCount == 0);
- osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)));
- lock_AssertWrite(&buf_globalLock);
-
- if (bp->flags & CM_BUF_INHASH) {
- /* Remove from hash */
-
- i = BUF_HASH(&bp->fid, &bp->offset);
- lbpp = &(buf_hashTablepp[i]);
- for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
- if (tbp == bp) break;
- }
-
- /* we better find it */
- osi_assertx(tbp != NULL, "buf_GetNewLocked: hash table screwup");
-
- *lbpp = bp->hashp; /* hash out */
-
- /* Remove from file hash */
-
- i = BUF_FILEHASH(&bp->fid);
- prevBp = bp->fileHashBackp;
- nextBp = bp->fileHashp;
- if (prevBp)
- prevBp->fileHashp = nextBp;
- else
- buf_fileHashTablepp[i] = nextBp;
- if (nextBp)
- nextBp->fileHashBackp = prevBp;
-
- bp->flags &= ~CM_BUF_INHASH;
- }
-
- /* bump the soft reference counter now, to invalidate softRefs; no
- * wakeup is required since people don't sleep waiting for this
- * counter to change.
- */
- bp->idCounter++;
-
- /* make the fid unrecognizable */
- memset(&bp->fid, 0, sizeof(bp->fid));
-}
+ int i;
+ cm_buf_t **lbpp;
+ cm_buf_t *tbp;
+ cm_buf_t *prevBp, *nextBp;
+
+ osi_assert(bp->magic == CM_BUF_MAGIC);
+
+ /* if we get here, we know that the buffer still has a 0 ref count,
+ * and that it is clean and has no currently pending I/O. This is
+ * the dude to return.
+ * Remember that as long as the ref count is 0, we know that we won't
+ * have any lock conflicts, so we can grab the buffer lock out of
+ * order in the locking hierarchy.
+ */
+ osi_Log2( buf_logp, "buf_Recycle recycles 0x%x, off 0x%x",
+ bp, bp->offset.LowPart);
+
+ osi_assert(bp->refCount == 0);
+ osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)));
+ lock_AssertWrite(&buf_globalLock);
+
+ if (bp->flags & CM_BUF_INHASH) {
+ /* Remove from hash */
+
+ i = BUF_HASH(&bp->fid, &bp->offset);
+ lbpp = &(cm_data.buf_hashTablepp[i]);
+ for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
+ if (tbp == bp) break;
+ }
+
+ /* we better find it */
+ osi_assertx(tbp != NULL, "buf_Recycle: hash table screwup");
+
+ *lbpp = bp->hashp; /* hash out */
+
+ /* Remove from file hash */
+
+ i = BUF_FILEHASH(&bp->fid);
+ prevBp = bp->fileHashBackp;
+ nextBp = bp->fileHashp;
+ if (prevBp)
+ prevBp->fileHashp = nextBp;
+ else
+ cm_data.buf_fileHashTablepp[i] = nextBp;
+ if (nextBp)
+ nextBp->fileHashBackp = prevBp;
+
+ bp->flags &= ~CM_BUF_INHASH;
+ }
+
+ /* bump the soft reference counter now, to invalidate softRefs; no
+ * wakeup is required since people don't sleep waiting for this
+ * counter to change.
+ */
+ bp->idCounter++;
+
+ /* make the fid unrecognizable */
+ memset(&bp->fid, 0, sizeof(cm_fid_t));
+}
/* recycle a buffer, removing it from the free list, hashing in its new identity
* and returning it write-locked so that no one can use it. Called without
*/
long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
{
- cm_buf_t *bp; /* buffer we're dealing with */
- cm_buf_t *nextBp; /* next buffer in file hash chain */
- long i; /* temp */
- cm_req_t req;
-
- cm_InitReq(&req); /* just in case */
-
- while(1) {
-retry:
- lock_ObtainWrite(&buf_globalLock);
- /* check to see if we lost the race */
- if (scp) {
- if (bp = buf_LockedFind(scp, offsetp)) {
- bp->refCount--;
- lock_ReleaseWrite(&buf_globalLock);
- return CM_BUF_EXISTS;
- }
- }
-
- /* for debugging, assert free list isn't empty, although we
- * really should try waiting for a running tranasction to finish
- * instead of this; or better, we should have a transaction
- * throttler prevent us from entering this situation.
- */
- osi_assertx(buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
+ cm_buf_t *bp; /* buffer we're dealing with */
+ cm_buf_t *nextBp; /* next buffer in file hash chain */
+ long i; /* temp */
+ cm_req_t req;
- /* look at all buffers in free list, some of which may temp.
- * have high refcounts and which then should be skipped,
- * starting cleaning I/O for those which are dirty. If we find
- * a clean buffer, we rehash it, lock it and return it.
- */
- for(bp = buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
- /* check to see if it really has zero ref count. This
- * code can bump refcounts, at least, so it may not be
- * zero.
- */
- if (bp->refCount > 0) continue;
-
- /* we don't have to lock buffer itself, since the ref
- * count is 0 and we know it will stay zero as long as
- * we hold the global lock.
- */
-
- /* don't recycle someone in our own chunk */
- if (!cm_FidCmp(&bp->fid, &scp->fid)
- && (bp->offset.LowPart & (-cm_chunkSize))
- == (offsetp->LowPart & (-cm_chunkSize)))
- continue;
-
- /* if this page is being filled (!) or cleaned, see if
- * the I/O has completed. If not, skip it, otherwise
- * do the final processing for the I/O.
- */
- if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
- /* probably shouldn't do this much work while
- * holding the big lock? Watch for contention
- * here.
- */
- continue;
- }
-
- if (bp->flags & CM_BUF_DIRTY) {
- /* if the buffer is dirty, start cleaning it and
- * move on to the next buffer. We do this with
- * just the lock required to minimize contention
- * on the big lock.
- */
- bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
-
- /* grab required lock and clean; this only
- * starts the I/O. By the time we're back,
- * it'll still be marked dirty, but it will also
- * have the WRITING flag set, so we won't get
- * back here.
- */
- buf_CleanAsync(bp, &req);
-
- /* now put it back and go around again */
- buf_Release(bp);
- goto retry;
- }
-
- /* if we get here, we know that the buffer still has a 0
- * ref count, and that it is clean and has no currently
- * pending I/O. This is the dude to return.
- * Remember that as long as the ref count is 0, we know
- * that we won't have any lock conflicts, so we can grab
- * the buffer lock out of order in the locking hierarchy.
- */
- buf_Recycle(bp);
-
- /* clean up junk flags */
- bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
- bp->dataVersion = -1; /* unknown so far */
-
- /* now hash in as our new buffer, and give it the
- * appropriate label, if requested.
- */
- if (scp) {
- bp->flags |= CM_BUF_INHASH;
- bp->fid = scp->fid;
- bp->offset = *offsetp;
- i = BUF_HASH(&scp->fid, offsetp);
- bp->hashp = buf_hashTablepp[i];
- buf_hashTablepp[i] = bp;
- i = BUF_FILEHASH(&scp->fid);
- nextBp = buf_fileHashTablepp[i];
- bp->fileHashp = nextBp;
- bp->fileHashBackp = NULL;
- if (nextBp)
- nextBp->fileHashBackp = bp;
- buf_fileHashTablepp[i] = bp;
- }
+ cm_InitReq(&req); /* just in case */
+
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+
+ while(1) {
+ retry:
+ lock_ObtainWrite(&buf_globalLock);
+ /* check to see if we lost the race */
+ if (scp) {
+ if (bp = buf_LockedFind(scp, offsetp)) {
+ bp->refCount--;
+ lock_ReleaseWrite(&buf_globalLock);
+ return CM_BUF_EXISTS;
+ }
+ }
+
+ /* does this fix the problem below? it's a simple solution. */
+ if (!cm_data.buf_freeListEndp)
+ {
+ lock_ReleaseWrite(&buf_globalLock);
+ Sleep(200);
+ goto retry;
+ }
+
+ /* for debugging, assert free list isn't empty, although we
+ * really should try waiting for a running tranasction to finish
+ * instead of this; or better, we should have a transaction
+ * throttler prevent us from entering this situation.
+ */
+ osi_assertx(cm_data.buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
+
+ /* look at all buffers in free list, some of which may temp.
+ * have high refcounts and which then should be skipped,
+ * starting cleaning I/O for those which are dirty. If we find
+ * a clean buffer, we rehash it, lock it and return it.
+ */
+ for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
+ /* check to see if it really has zero ref count. This
+ * code can bump refcounts, at least, so it may not be
+ * zero.
+ */
+ if (bp->refCount > 0)
+ continue;
- /* prepare to return it. Start by giving it a good
- * refcount */
- bp->refCount = 1;
+ /* we don't have to lock buffer itself, since the ref
+ * count is 0 and we know it will stay zero as long as
+ * we hold the global lock.
+ */
+
+ /* don't recycle someone in our own chunk */
+ if (!cm_FidCmp(&bp->fid, &scp->fid)
+ && (bp->offset.LowPart & (-cm_chunkSize))
+ == (offsetp->LowPart & (-cm_chunkSize)))
+ continue;
+
+ /* if this page is being filled (!) or cleaned, see if
+ * the I/O has completed. If not, skip it, otherwise
+ * do the final processing for the I/O.
+ */
+ if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
+ /* probably shouldn't do this much work while
+ * holding the big lock? Watch for contention
+ * here.
+ */
+ continue;
+ }
- /* and since it has a non-zero ref count, we should move
- * it from the lru queue. It better be still there,
- * since we've held the global (big) lock since we found
- * it there.
- */
- osi_assertx(bp->flags & CM_BUF_INLRU,
- "buf_GetNewLocked: LRU screwup");
- if (buf_freeListEndp == bp) {
- /* we're the last guy in this queue, so maintain it */
- buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
- }
- osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
- bp->flags &= ~CM_BUF_INLRU;
+ if (bp->flags & CM_BUF_DIRTY) {
+ /* if the buffer is dirty, start cleaning it and
+ * move on to the next buffer. We do this with
+ * just the lock required to minimize contention
+ * on the big lock.
+ */
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+
+ /* grab required lock and clean; this only
+ * starts the I/O. By the time we're back,
+ * it'll still be marked dirty, but it will also
+ * have the WRITING flag set, so we won't get
+ * back here.
+ */
+ buf_CleanAsync(bp, &req);
+
+ /* now put it back and go around again */
+ buf_Release(bp);
+ goto retry;
+ }
+
+ /* if we get here, we know that the buffer still has a 0
+ * ref count, and that it is clean and has no currently
+ * pending I/O. This is the dude to return.
+ * Remember that as long as the ref count is 0, we know
+ * that we won't have any lock conflicts, so we can grab
+ * the buffer lock out of order in the locking hierarchy.
+ */
+ buf_Recycle(bp);
+
+ /* clean up junk flags */
+ bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
+ bp->dataVersion = -1; /* unknown so far */
+
+ /* now hash in as our new buffer, and give it the
+ * appropriate label, if requested.
+ */
+ if (scp) {
+ bp->flags |= CM_BUF_INHASH;
+ bp->fid = scp->fid;
+ bp->offset = *offsetp;
+ i = BUF_HASH(&scp->fid, offsetp);
+ bp->hashp = cm_data.buf_hashTablepp[i];
+ cm_data.buf_hashTablepp[i] = bp;
+ i = BUF_FILEHASH(&scp->fid);
+ nextBp = cm_data.buf_fileHashTablepp[i];
+ bp->fileHashp = nextBp;
+ bp->fileHashBackp = NULL;
+ if (nextBp)
+ nextBp->fileHashBackp = bp;
+ cm_data.buf_fileHashTablepp[i] = bp;
+ }
+
+ /* prepare to return it. Start by giving it a good
+ * refcount */
+ bp->refCount = 1;
- /* finally, grab the mutex so that people don't use it
- * before the caller fills it with data. Again, no one
- * should have been able to get to this dude to lock it.
- */
- osi_assertx(lock_TryMutex(&bp->mx),
- "buf_GetNewLocked: TryMutex failed");
-
- lock_ReleaseWrite(&buf_globalLock);
- *bufpp = bp;
- return 0;
- } /* for all buffers in lru queue */
- lock_ReleaseWrite(&buf_globalLock);
- } /* while loop over everything */
- /* not reached */
+ /* and since it has a non-zero ref count, we should move
+ * it from the lru queue. It better be still there,
+ * since we've held the global (big) lock since we found
+ * it there.
+ */
+ osi_assertx(bp->flags & CM_BUF_INLRU,
+ "buf_GetNewLocked: LRU screwup");
+ if (cm_data.buf_freeListEndp == bp) {
+ /* we're the last guy in this queue, so maintain it */
+ cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
+ }
+ osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
+ bp->flags &= ~CM_BUF_INLRU;
+
+ /* finally, grab the mutex so that people don't use it
+ * before the caller fills it with data. Again, no one
+ * should have been able to get to this dude to lock it.
+ */
+ osi_assertx(lock_TryMutex(&bp->mx),
+ "buf_GetNewLocked: TryMutex failed");
+
+ lock_ReleaseWrite(&buf_globalLock);
+ *bufpp = bp;
+
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+ return 0;
+ } /* for all buffers in lru queue */
+ lock_ReleaseWrite(&buf_globalLock);
+ } /* while loop over everything */
+ /* not reached */
} /* the proc */
/* get a page, returning it held but unlocked. Doesn't fill in the page
*/
long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
{
- cm_buf_t *bp;
- long code;
- osi_hyper_t pageOffset;
- int created;
-
- created = 0;
- pageOffset.HighPart = offsetp->HighPart;
- pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
- while (1) {
- lock_ObtainWrite(&buf_globalLock);
- bp = buf_LockedFind(scp, &pageOffset);
- lock_ReleaseWrite(&buf_globalLock);
- if (bp) {
- /* lock it and break out */
- lock_ObtainMutex(&bp->mx);
- break;
- }
-
- /* otherwise, we have to create a page */
- code = buf_GetNewLocked(scp, &pageOffset, &bp);
+ cm_buf_t *bp;
+ long code;
+ osi_hyper_t pageOffset;
+ int created;
+
+ created = 0;
+ pageOffset.HighPart = offsetp->HighPart;
+ pageOffset.LowPart = offsetp->LowPart & ~(cm_data.buf_blockSize-1);
+ while (1) {
+ bp = buf_Find(scp, &pageOffset);
+ if (bp) {
+ /* lock it and break out */
+ lock_ObtainMutex(&bp->mx);
+ break;
+ }
- /* check if the buffer was created in a race condition branch.
- * If so, go around so we can hold a reference to it.
- */
- if (code == CM_BUF_EXISTS) continue;
-
- /* something else went wrong */
- if (code != 0) return code;
-
- /* otherwise, we have a locked buffer that we just created */
- created = 1;
- break;
- } /* big while loop */
-
- /* wait for reads */
- if (bp->flags & CM_BUF_READING)
- buf_WaitIO(bp);
+ /* otherwise, we have to create a page */
+ code = buf_GetNewLocked(scp, &pageOffset, &bp);
- /* once it has been read once, we can unlock it and return it, still
- * with its refcount held.
+ /* check if the buffer was created in a race condition branch.
+ * If so, go around so we can hold a reference to it.
*/
- lock_ReleaseMutex(&bp->mx);
- *bufpp = bp;
- osi_Log3(buf_logp, "buf_GetNew returning bp 0x%x for file 0x%x, offset 0x%x",
- bp, (long) scp, offsetp->LowPart);
- return 0;
+ if (code == CM_BUF_EXISTS)
+ continue;
+
+ /* something else went wrong */
+ if (code != 0)
+ return code;
+
+ /* otherwise, we have a locked buffer that we just created */
+ created = 1;
+ break;
+ } /* big while loop */
+
+ /* wait for reads */
+ if (bp->flags & CM_BUF_READING)
+ buf_WaitIO(scp, bp);
+
+ /* once it has been read once, we can unlock it and return it, still
+ * with its refcount held.
+ */
+ lock_ReleaseMutex(&bp->mx);
+ *bufpp = bp;
+ osi_Log3(buf_logp, "buf_GetNew returning bp 0x%x for file 0x%x, offset 0x%x",
+ bp, (long) scp, offsetp->LowPart);
+ return 0;
}
/* get a page, returning it held but unlocked. Make sure it is complete */
+/* The scp must be unlocked when passed to this function */
long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
{
- cm_buf_t *bp;
- long code;
- osi_hyper_t pageOffset;
- unsigned long tcount;
- int created;
+ cm_buf_t *bp;
+ long code;
+ osi_hyper_t pageOffset;
+ unsigned long tcount;
+ int created;
+ long lcount = 0;
#ifdef DISKCACHE95
- cm_diskcache_t *dcp;
+ cm_diskcache_t *dcp;
#endif /* DISKCACHE95 */
- created = 0;
- pageOffset.HighPart = offsetp->HighPart;
- pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
- while (1) {
- lock_ObtainWrite(&buf_globalLock);
- bp = buf_LockedFind(scp, &pageOffset);
- lock_ReleaseWrite(&buf_globalLock);
- if (bp) {
- /* lock it and break out */
- lock_ObtainMutex(&bp->mx);
- break;
+ created = 0;
+ pageOffset.HighPart = offsetp->HighPart;
+ pageOffset.LowPart = offsetp->LowPart & ~(cm_data.buf_blockSize-1);
+ while (1) {
+ lcount++;
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+
+ bp = buf_Find(scp, &pageOffset);
+ if (bp) {
+ /* lock it and break out */
+ lock_ObtainMutex(&bp->mx);
+ break;
#ifdef DISKCACHE95
- /* touch disk chunk to update LRU info */
- diskcache_Touch(bp->dcp);
+ /* touch disk chunk to update LRU info */
+ diskcache_Touch(bp->dcp);
#endif /* DISKCACHE95 */
- }
-
- /* otherwise, we have to create a page */
- code = buf_GetNewLocked(scp, &pageOffset, &bp);
+ }
- /* check if the buffer was created in a race condition branch.
- * If so, go around so we can hold a reference to it.
- */
- if (code == CM_BUF_EXISTS) continue;
-
- /* something else went wrong */
- if (code != 0) return code;
-
- /* otherwise, we have a locked buffer that we just created */
- created = 1;
- break;
- } /* big while loop */
-
- /* if we get here, we have a locked buffer that may have just been
- * created, in which case it needs to be filled with data.
- */
- if (created) {
- /* load the page; freshly created pages should be idle */
- osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)));
+ /* otherwise, we have to create a page */
+ code = buf_GetNewLocked(scp, &pageOffset, &bp);
- /* setup offset, event */
+ /* check if the buffer was created in a race condition branch.
+ * If so, go around so we can hold a reference to it.
+ */
+ if (code == CM_BUF_EXISTS)
+ continue;
+
+ /* something else went wrong */
+ if (code != 0) {
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+ return code;
+ }
+
+ /* otherwise, we have a locked buffer that we just created */
+ created = 1;
+ break;
+ } /* big while loop */
+
+ /* if we get here, we have a locked buffer that may have just been
+ * created, in which case it needs to be filled with data.
+ */
+ if (created) {
+ /* load the page; freshly created pages should be idle */
+ osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)));
+
+ /* setup offset, event */
#ifndef DJGPP /* doesn't seem to be used */
- bp->over.Offset = bp->offset.LowPart;
- bp->over.OffsetHigh = bp->offset.HighPart;
+ bp->over.Offset = bp->offset.LowPart;
+ bp->over.OffsetHigh = bp->offset.HighPart;
#endif /* !DJGPP */
- /* start the I/O; may drop lock */
- bp->flags |= CM_BUF_READING;
- code = (*cm_buf_opsp->Readp)(bp, buf_bufferSize, &tcount, NULL);
+ /* start the I/O; may drop lock */
+ bp->flags |= CM_BUF_READING;
+ code = (*cm_buf_opsp->Readp)(bp, cm_data.buf_blockSize, &tcount, NULL);
#ifdef DISKCACHE95
- code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, buf_bufferSize, &bp->dataVersion, &tcount, &dcp);
- bp->dcp = dcp; /* pointer to disk cache struct. */
+ code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, cm_data.buf_blockSize, &bp->dataVersion, &tcount, &dcp);
+ bp->dcp = dcp; /* pointer to disk cache struct. */
#endif /* DISKCACHE95 */
- if (code != 0) {
- /* failure or queued */
+ if (code != 0) {
+ /* failure or queued */
#ifndef DJGPP /* cm_bufRead always returns 0 */
- if (code != ERROR_IO_PENDING) {
+ if (code != ERROR_IO_PENDING) {
#endif
- bp->error = code;
- bp->flags |= CM_BUF_ERROR;
- bp->flags &= ~CM_BUF_READING;
- if (bp->flags & CM_BUF_WAITING) {
- bp->flags &= ~CM_BUF_WAITING;
- osi_Wakeup((long) bp);
- }
- lock_ReleaseMutex(&bp->mx);
- buf_Release(bp);
- return code;
+ bp->error = code;
+ bp->flags |= CM_BUF_ERROR;
+ bp->flags &= ~CM_BUF_READING;
+ if (bp->flags & CM_BUF_WAITING) {
+ osi_Log1(buf_logp, "buf_Get Waking bp 0x%x", bp);
+ osi_Wakeup((long) bp);
+ }
+ lock_ReleaseMutex(&bp->mx);
+ buf_Release(bp);
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+ return code;
#ifndef DJGPP
- }
+ }
#endif
- } else {
- /* otherwise, I/O completed instantly and we're done, except
- * for padding the xfr out with 0s and checking for EOF
- */
- if (tcount < (unsigned long) buf_bufferSize) {
- memset(bp->datap+tcount, 0, buf_bufferSize - tcount);
- if (tcount == 0)
- bp->flags |= CM_BUF_EOF;
- }
- bp->flags &= ~CM_BUF_READING;
- if (bp->flags & CM_BUF_WAITING) {
- bp->flags &= ~CM_BUF_WAITING;
- osi_Wakeup((long) bp);
- }
- }
-
- } /* if created */
-
- /* wait for reads, either that which we started above, or that someone
- * else started. We don't care if we return a buffer being cleaned.
- */
- if (bp->flags & CM_BUF_READING)
- buf_WaitIO(bp);
-
- /* once it has been read once, we can unlock it and return it, still
- * with its refcount held.
- */
- lock_ReleaseMutex(&bp->mx);
- *bufpp = bp;
-
- /* now remove from queue; will be put in at the head (farthest from
- * being recycled) when we're done in buf_Release.
- */
- lock_ObtainWrite(&buf_globalLock);
- if (bp->flags & CM_BUF_INLRU) {
- if (buf_freeListEndp == bp)
- buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
- osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
- bp->flags &= ~CM_BUF_INLRU;
+ } else {
+ /* otherwise, I/O completed instantly and we're done, except
+ * for padding the xfr out with 0s and checking for EOF
+ */
+ if (tcount < (unsigned long) cm_data.buf_blockSize) {
+ memset(bp->datap+tcount, 0, cm_data.buf_blockSize - tcount);
+ if (tcount == 0)
+ bp->flags |= CM_BUF_EOF;
+ }
+ bp->flags &= ~CM_BUF_READING;
+ if (bp->flags & CM_BUF_WAITING) {
+ osi_Log1(buf_logp, "buf_Get Waking bp 0x%x", bp);
+ osi_Wakeup((long) bp);
+ }
}
- lock_ReleaseWrite(&buf_globalLock);
- osi_Log3(buf_logp, "buf_Get returning bp 0x%x for file 0x%x, offset 0x%x",
- bp, (long) scp, offsetp->LowPart);
- return 0;
+ } /* if created */
+
+ /* wait for reads, either that which we started above, or that someone
+ * else started. We don't care if we return a buffer being cleaned.
+ */
+ if (bp->flags & CM_BUF_READING)
+ buf_WaitIO(scp, bp);
+
+ /* once it has been read once, we can unlock it and return it, still
+ * with its refcount held.
+ */
+ lock_ReleaseMutex(&bp->mx);
+ *bufpp = bp;
+
+ /* now remove from queue; will be put in at the head (farthest from
+ * being recycled) when we're done in buf_Release.
+ */
+ lock_ObtainWrite(&buf_globalLock);
+ if (bp->flags & CM_BUF_INLRU) {
+ if (cm_data.buf_freeListEndp == bp)
+ cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
+ osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
+ bp->flags &= ~CM_BUF_INLRU;
+ }
+ lock_ReleaseWrite(&buf_globalLock);
+
+ osi_Log3(buf_logp, "buf_Get returning bp 0x%x for file 0x%x, offset 0x%x",
+ bp, (long) scp, offsetp->LowPart);
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+ return 0;
}
/* count # of elements in the free list;
*/
long buf_CountFreeList(void)
{
- long count;
- cm_buf_t *bufp;
-
- count = 0;
- lock_ObtainRead(&buf_globalLock);
- for(bufp = buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
- /* if the buffer doesn't have an identity, or if the buffer
- * has been invalidate (by having its DV stomped upon), then
- * count it as free, since it isn't really being utilized.
- */
- if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
- count++;
- }
- lock_ReleaseRead(&buf_globalLock);
- return count;
+ long count;
+ cm_buf_t *bufp;
+
+ count = 0;
+ lock_ObtainRead(&buf_globalLock);
+ for(bufp = cm_data.buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
+ /* if the buffer doesn't have an identity, or if the buffer
+ * has been invalidate (by having its DV stomped upon), then
+ * count it as free, since it isn't really being utilized.
+ */
+ if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
+ count++;
+ }
+ lock_ReleaseRead(&buf_globalLock);
+ return count;
}
/* clean a buffer synchronously */
void buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
{
- lock_ObtainMutex(&bp->mx);
- buf_LockedCleanAsync(bp, reqp);
- lock_ReleaseMutex(&bp->mx);
-}
+ osi_assert(bp->magic == CM_BUF_MAGIC);
+
+ lock_ObtainMutex(&bp->mx);
+ buf_LockedCleanAsync(bp, reqp);
+ lock_ReleaseMutex(&bp->mx);
+}
/* wait for a buffer's cleaning to finish */
-void buf_CleanWait(cm_buf_t *bp)
+void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp)
{
- lock_ObtainMutex(&bp->mx);
- if (bp->flags & CM_BUF_WRITING) {
- buf_WaitIO(bp);
- }
- lock_ReleaseMutex(&bp->mx);
-}
+ osi_assert(bp->magic == CM_BUF_MAGIC);
+
+ lock_ObtainMutex(&bp->mx);
+ if (bp->flags & CM_BUF_WRITING) {
+ buf_WaitIO(scp, bp);
+ }
+ lock_ReleaseMutex(&bp->mx);
+}
/* set the dirty flag on a buffer, and set associated write-ahead log,
* if there is one. Allow one to be added to a buffer, but not changed.
*/
void buf_SetDirty(cm_buf_t *bp)
{
- osi_assert(bp->refCount > 0);
+ osi_assert(bp->magic == CM_BUF_MAGIC);
+ osi_assert(bp->refCount > 0);
- osi_Log1(buf_logp, "buf_SetDirty 0x%x", bp);
+ osi_Log1(buf_logp, "buf_SetDirty 0x%x", bp);
- /* set dirty bit */
- bp->flags |= CM_BUF_DIRTY;
+ /* set dirty bit */
+ bp->flags |= CM_BUF_DIRTY;
- /* and turn off EOF flag, since it has associated data now */
- bp->flags &= ~CM_BUF_EOF;
+ /* and turn off EOF flag, since it has associated data now */
+ bp->flags &= ~CM_BUF_EOF;
}
/* clean all buffers, reset log pointers and invalidate all buffers.
*/
long buf_CleanAndReset(void)
{
- long i;
- cm_buf_t *bp;
- cm_req_t req;
-
- lock_ObtainWrite(&buf_globalLock);
- for(i=0; i<buf_hashSize; i++) {
- for(bp = buf_hashTablepp[i]; bp; bp = bp->hashp) {
- bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
-
- /* now no locks are held; clean buffer and go on */
- cm_InitReq(&req);
- buf_CleanAsync(bp, &req);
- buf_CleanWait(bp);
-
- /* relock and release buffer */
- lock_ObtainWrite(&buf_globalLock);
- buf_LockedRelease(bp);
- } /* over one bucket */
- } /* for loop over all hash buckets */
-
- /* release locks */
- lock_ReleaseWrite(&buf_globalLock);
+ long i;
+ cm_buf_t *bp;
+ cm_req_t req;
- /* and we're done */
- return 0;
-}
+ lock_ObtainWrite(&buf_globalLock);
+ for(i=0; i<cm_data.buf_hashSize; i++) {
+ for(bp = cm_data.buf_hashTablepp[i]; bp; bp = bp->hashp) {
+ if ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+
+ /* now no locks are held; clean buffer and go on */
+ cm_InitReq(&req);
+ buf_CleanAsync(bp, &req);
+ buf_CleanWait(NULL, bp);
+
+ /* relock and release buffer */
+ lock_ObtainWrite(&buf_globalLock);
+ buf_LockedRelease(bp);
+ } /* dirty */
+ } /* over one bucket */
+ } /* for loop over all hash buckets */
+
+ /* release locks */
+ lock_ReleaseWrite(&buf_globalLock);
+
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+
+ /* and we're done */
+ return 0;
+}
/* called without global lock being held, reserves buffers for callers
* that need more than one held (not locked) at once.
*/
void buf_ReserveBuffers(long nbuffers)
{
- lock_ObtainWrite(&buf_globalLock);
- while (1) {
- if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
- buf_reserveWaiting = 1;
- osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
- osi_SleepW((long) &buf_reservedBufs, &buf_globalLock);
- lock_ObtainWrite(&buf_globalLock);
- }
- else {
- buf_reservedBufs += nbuffers;
- break;
- }
+ lock_ObtainWrite(&buf_globalLock);
+ while (1) {
+ if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
+ cm_data.buf_reserveWaiting = 1;
+ osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
+ osi_SleepW((long) &cm_data.buf_reservedBufs, &buf_globalLock);
+ lock_ObtainWrite(&buf_globalLock);
+ }
+ else {
+ cm_data.buf_reservedBufs += nbuffers;
+ break;
}
- lock_ReleaseWrite(&buf_globalLock);
+ }
+ lock_ReleaseWrite(&buf_globalLock);
}
int buf_TryReserveBuffers(long nbuffers)
{
- int code;
-
- lock_ObtainWrite(&buf_globalLock);
- if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
- code = 0;
- }
- else {
- buf_reservedBufs += nbuffers;
- code = 1;
- }
- lock_ReleaseWrite(&buf_globalLock);
- return code;
-}
+ int code;
+
+ lock_ObtainWrite(&buf_globalLock);
+ if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
+ code = 0;
+ }
+ else {
+ cm_data.buf_reservedBufs += nbuffers;
+ code = 1;
+ }
+ lock_ReleaseWrite(&buf_globalLock);
+ return code;
+}
/* called without global lock held, releases reservation held by
* buf_ReserveBuffers.
*/
void buf_UnreserveBuffers(long nbuffers)
{
- lock_ObtainWrite(&buf_globalLock);
- buf_reservedBufs -= nbuffers;
- if (buf_reserveWaiting) {
- buf_reserveWaiting = 0;
- osi_Wakeup((long) &buf_reservedBufs);
- }
- lock_ReleaseWrite(&buf_globalLock);
-}
+ lock_ObtainWrite(&buf_globalLock);
+ cm_data.buf_reservedBufs -= nbuffers;
+ if (cm_data.buf_reserveWaiting) {
+ cm_data.buf_reserveWaiting = 0;
+ osi_Wakeup((long) &cm_data.buf_reservedBufs);
+ }
+ lock_ReleaseWrite(&buf_globalLock);
+}
/* truncate the buffers past sizep, zeroing out the page, if we don't
* end on a page boundary.
* Requires cm_bufCreateLock to be write locked.
*/
long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
- osi_hyper_t *sizep)
+ osi_hyper_t *sizep)
{
- cm_buf_t *bufp;
- cm_buf_t *nbufp; /* next buffer, if didRelease */
- osi_hyper_t bufEnd;
- long code;
- long bufferPos;
- int didRelease;
- long i;
-
- /* assert that cm_bufCreateLock is held in write mode */
- lock_AssertWrite(&scp->bufCreateLock);
-
- i = BUF_FILEHASH(&scp->fid);
-
- lock_ObtainWrite(&buf_globalLock);
- bufp = buf_fileHashTablepp[i];
- if (bufp == NULL) {
- lock_ReleaseWrite(&buf_globalLock);
- return 0;
- }
-
- bufp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
- for(; bufp; bufp = nbufp) {
- didRelease = 0;
- lock_ObtainMutex(&bufp->mx);
-
- bufEnd.HighPart = 0;
- bufEnd.LowPart = buf_bufferSize;
- bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
-
- if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
- LargeIntegerLessThan(*sizep, bufEnd)) {
- buf_WaitIO(bufp);
- }
- lock_ObtainMutex(&scp->mx);
+ cm_buf_t *bufp;
+ cm_buf_t *nbufp; /* next buffer, if didRelease */
+ osi_hyper_t bufEnd;
+ long code;
+ long bufferPos;
+ int didRelease;
+ long i;
+
+ /* assert that cm_bufCreateLock is held in write mode */
+ lock_AssertWrite(&scp->bufCreateLock);
+
+ i = BUF_FILEHASH(&scp->fid);
+
+ lock_ObtainWrite(&buf_globalLock);
+ bufp = cm_data.buf_fileHashTablepp[i];
+ if (bufp == NULL) {
+ lock_ReleaseWrite(&buf_globalLock);
+ return 0;
+ }
+
+ bufp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+ for(; bufp; bufp = nbufp) {
+ didRelease = 0;
+ lock_ObtainMutex(&bufp->mx);
+
+ bufEnd.HighPart = 0;
+ bufEnd.LowPart = cm_data.buf_blockSize;
+ bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
+
+ if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
+ LargeIntegerLessThan(*sizep, bufEnd)) {
+ buf_WaitIO(scp, bufp);
+ }
+ lock_ObtainMutex(&scp->mx);
- /* make sure we have a callback (so we have the right value for
- * the length), and wait for it to be safe to do a truncate.
- */
- code = cm_SyncOp(scp, bufp, userp, reqp, 0,
- CM_SCACHESYNC_NEEDCALLBACK
- | CM_SCACHESYNC_GETSTATUS
- | CM_SCACHESYNC_SETSIZE
- | CM_SCACHESYNC_BUFLOCKED);
- /* if we succeeded in our locking, and this applies to the right
- * file, and the truncate request overlaps the buffer either
- * totally or partially, then do something.
+ /* make sure we have a callback (so we have the right value for
+ * the length), and wait for it to be safe to do a truncate.
+ */
+ code = cm_SyncOp(scp, bufp, userp, reqp, 0,
+ CM_SCACHESYNC_NEEDCALLBACK
+ | CM_SCACHESYNC_GETSTATUS
+ | CM_SCACHESYNC_SETSIZE
+ | CM_SCACHESYNC_BUFLOCKED);
+ /* if we succeeded in our locking, and this applies to the right
+ * file, and the truncate request overlaps the buffer either
+ * totally or partially, then do something.
+ */
+ if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
+ && LargeIntegerLessThan(*sizep, bufEnd)) {
+
+ lock_ObtainWrite(&buf_globalLock);
+
+ /* destroy the buffer, turning off its dirty bit, if
+ * we're truncating the whole buffer. Otherwise, set
+ * the dirty bit, and clear out the tail of the buffer
+ * if we just overlap some.
+ */
+ if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
+ /* truncating the entire page */
+ bufp->flags &= ~CM_BUF_DIRTY;
+ bufp->dataVersion = -1; /* known bad */
+ bufp->dirtyCounter++;
+ }
+ else {
+ /* don't set dirty, since dirty implies
+ * currently up-to-date. Don't need to do this,
+ * since we'll update the length anyway.
+ *
+ * Zero out remainder of the page, in case we
+ * seek and write past EOF, and make this data
+ * visible again.
*/
- if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
- && LargeIntegerLessThan(*sizep, bufEnd)) {
-
- lock_ObtainWrite(&buf_globalLock);
-
- /* destroy the buffer, turning off its dirty bit, if
- * we're truncating the whole buffer. Otherwise, set
- * the dirty bit, and clear out the tail of the buffer
- * if we just overlap some.
- */
- if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
- /* truncating the entire page */
- bufp->flags &= ~CM_BUF_DIRTY;
- bufp->dataVersion = -1; /* known bad */
- bufp->dirtyCounter++;
- }
- else {
- /* don't set dirty, since dirty implies
- * currently up-to-date. Don't need to do this,
- * since we'll update the length anyway.
- *
- * Zero out remainder of the page, in case we
- * seek and write past EOF, and make this data
- * visible again.
- */
- bufferPos = sizep->LowPart & (buf_bufferSize - 1);
- osi_assert(bufferPos != 0);
- memset(bufp->datap + bufferPos, 0,
- buf_bufferSize - bufferPos);
- }
-
- lock_ReleaseWrite(&buf_globalLock);
+ bufferPos = sizep->LowPart & (cm_data.buf_blockSize - 1);
+ osi_assert(bufferPos != 0);
+ memset(bufp->datap + bufferPos, 0,
+ cm_data.buf_blockSize - bufferPos);
+ }
- }
+ lock_ReleaseWrite(&buf_globalLock);
+ }
- lock_ReleaseMutex(&scp->mx);
- lock_ReleaseMutex(&bufp->mx);
- if (!didRelease) {
- lock_ObtainWrite(&buf_globalLock);
- nbufp = bufp->fileHashp;
- if (nbufp) nbufp->refCount++;
- buf_LockedRelease(bufp);
- lock_ReleaseWrite(&buf_globalLock);
- }
-
- /* bail out early if we fail */
- if (code) {
- /* at this point, nbufp is held; bufp has already been
- * released.
- */
- if (nbufp) buf_Release(nbufp);
- return code;
- }
- }
-
- /* success */
- return 0;
+ lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseMutex(&bufp->mx);
+ if (!didRelease) {
+ lock_ObtainWrite(&buf_globalLock);
+ nbufp = bufp->fileHashp;
+ if (nbufp) nbufp->refCount++;
+ buf_LockedRelease(bufp);
+ lock_ReleaseWrite(&buf_globalLock);
+ }
+
+ /* bail out early if we fail */
+ if (code) {
+ /* at this point, nbufp is held; bufp has already been
+ * released.
+ */
+ if (nbufp)
+ buf_Release(nbufp);
+
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+
+ return code;
+ }
+ }
+
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+
+ /* success */
+ return 0;
}
long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
{
- long code;
- cm_buf_t *bp; /* buffer we're hacking on */
- cm_buf_t *nbp;
- int didRelease;
- long i;
-
- i = BUF_FILEHASH(&scp->fid);
-
- code = 0;
- lock_ObtainWrite(&buf_globalLock);
- bp = buf_fileHashTablepp[i];
- if (bp) bp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
- for(; bp; bp = nbp) {
- didRelease = 0; /* haven't released this buffer yet */
-
- /* clean buffer synchronously */
- if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
- lock_ObtainMutex(&bp->mx);
-
- /* start cleaning the buffer, and wait for it to finish */
- buf_LockedCleanAsync(bp, reqp);
- buf_WaitIO(bp);
- lock_ReleaseMutex(&bp->mx);
-
- code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
- if (code) goto skip;
-
- lock_ObtainWrite(&buf_globalLock);
- /* actually, we only know that buffer is clean if ref
- * count is 1, since we don't have buffer itself locked.
- */
- if (!(bp->flags & CM_BUF_DIRTY)) {
- if (bp->refCount == 1) { /* bp is held above */
- buf_LockedRelease(bp);
- nbp = bp->fileHashp;
- if (nbp) nbp->refCount++;
- didRelease = 1;
- buf_Recycle(bp);
- }
- }
- lock_ReleaseWrite(&buf_globalLock);
-
- (*cm_buf_opsp->Unstabilizep)(scp, userp);
- }
-
-skip:
- if (!didRelease) {
- lock_ObtainWrite(&buf_globalLock);
- if (nbp = bp->fileHashp) nbp->refCount++;
- buf_LockedRelease(bp);
- lock_ReleaseWrite(&buf_globalLock);
- }
- } /* for loop over a bunch of buffers */
-
- /* done */
- return code;
-}
+ long code;
+ cm_buf_t *bp; /* buffer we're hacking on */
+ cm_buf_t *nbp;
+ int didRelease;
+ long i;
+
+ i = BUF_FILEHASH(&scp->fid);
+
+ code = 0;
+ lock_ObtainWrite(&buf_globalLock);
+ bp = cm_data.buf_fileHashTablepp[i];
+ if (bp)
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+ for (; bp; bp = nbp) {
+ didRelease = 0; /* haven't released this buffer yet */
+
+ /* clean buffer synchronously */
+ if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
+ lock_ObtainMutex(&bp->mx);
+
+ /* start cleaning the buffer, and wait for it to finish */
+ buf_LockedCleanAsync(bp, reqp);
+ buf_WaitIO(scp, bp);
+ lock_ReleaseMutex(&bp->mx);
+
+ code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
+ if (code)
+ goto skip;
+
+ lock_ObtainWrite(&buf_globalLock);
+ /* actually, we only know that buffer is clean if ref
+ * count is 1, since we don't have buffer itself locked.
+ */
+ if (!(bp->flags & CM_BUF_DIRTY)) {
+ if (bp->refCount == 1) { /* bp is held above */
+ buf_LockedRelease(bp);
+ nbp = bp->fileHashp;
+ if (nbp)
+ nbp->refCount++;
+ didRelease = 1;
+ buf_Recycle(bp);
+ }
+ }
+ lock_ReleaseWrite(&buf_globalLock);
+
+ (*cm_buf_opsp->Unstabilizep)(scp, userp);
+ }
+
+ skip:
+ if (!didRelease) {
+ lock_ObtainWrite(&buf_globalLock);
+ if (nbp = bp->fileHashp)
+ nbp->refCount++;
+ buf_LockedRelease(bp);
+ lock_ReleaseWrite(&buf_globalLock);
+ }
+ } /* for loop over a bunch of buffers */
+
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+
+ /* done */
+ return code;
+}
long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
{
- long code;
- cm_buf_t *bp; /* buffer we're hacking on */
- cm_buf_t *nbp; /* next one */
- long i;
+ long code;
+ cm_buf_t *bp; /* buffer we're hacking on */
+ cm_buf_t *nbp; /* next one */
+ long i;
- i = BUF_FILEHASH(&scp->fid);
+ i = BUF_FILEHASH(&scp->fid);
- code = 0;
- lock_ObtainWrite(&buf_globalLock);
- bp = buf_fileHashTablepp[i];
- if (bp) bp->refCount++;
+ code = 0;
+ lock_ObtainWrite(&buf_globalLock);
+ bp = cm_data.buf_fileHashTablepp[i];
+ if (bp)
+ bp->refCount++;
+ lock_ReleaseWrite(&buf_globalLock);
+ for (; bp; bp = nbp) {
+ /* clean buffer synchronously */
+ if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
+ if (userp) {
+ cm_HoldUser(userp);
+ lock_ObtainMutex(&bp->mx);
+ if (bp->userp)
+ cm_ReleaseUser(bp->userp);
+ bp->userp = userp;
+ lock_ReleaseMutex(&bp->mx);
+ }
+ buf_CleanAsync(bp, reqp);
+ buf_CleanWait(scp, bp);
+ lock_ObtainMutex(&bp->mx);
+ if (bp->flags & CM_BUF_ERROR) {
+ if (code == 0 || code == -1)
+ code = bp->error;
+ if (code == 0)
+ code = -1;
+ }
+ lock_ReleaseMutex(&bp->mx);
+ }
+
+ lock_ObtainWrite(&buf_globalLock);
+ buf_LockedRelease(bp);
+ nbp = bp->fileHashp;
+ if (nbp)
+ nbp->refCount++;
lock_ReleaseWrite(&buf_globalLock);
- for(; bp; bp = nbp) {
- /* clean buffer synchronously */
- if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
- if (userp) {
- lock_ObtainMutex(&bp->mx);
- if (bp->userp) cm_ReleaseUser(bp->userp);
- bp->userp = userp;
- lock_ReleaseMutex(&bp->mx);
- cm_HoldUser(userp);
- }
- buf_CleanAsync(bp, reqp);
- buf_CleanWait(bp);
- lock_ObtainMutex(&bp->mx);
- if (bp->flags & CM_BUF_ERROR) {
- if (code == 0 || code == -1) code = bp->error;
- if (code == 0) code = -1;
- }
- lock_ReleaseMutex(&bp->mx);
- }
-
- lock_ObtainWrite(&buf_globalLock);
- buf_LockedRelease(bp);
- nbp = bp->fileHashp;
- if (nbp) nbp->refCount++;
- lock_ReleaseWrite(&buf_globalLock);
- } /* for loop over a bunch of buffers */
-
- /* done */
- return code;
+ } /* for loop over a bunch of buffers */
+
+#ifdef TESTING
+ buf_ValidateBufQueues();
+#endif /* TESTING */
+
+ /* done */
+ return code;
}
+#ifdef TESTING
+void
+buf_ValidateBufQueues(void)
+{
+ cm_buf_t * bp, *bpb, *bpf, *bpa;
+ afs_uint32 countf=0, countb=0, counta=0;
+
+ lock_ObtainRead(&buf_globalLock);
+ for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
+ if (bp->magic != CM_BUF_MAGIC)
+ DebugBreak();
+ countb++;
+ bpb = bp;
+ }
+
+ for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
+ if (bp->magic != CM_BUF_MAGIC)
+ DebugBreak();
+ countf++;
+ bpf = bp;
+ }
+
+ for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
+ if (bp->magic != CM_BUF_MAGIC)
+ DebugBreak();
+ counta++;
+ bpa = bp;
+ }
+ lock_ReleaseRead(&buf_globalLock);
+
+ if (countb != countf)
+ DebugBreak();
+
+ if (counta != cm_data.buf_nbuffers)
+ DebugBreak();
+}
+#endif /* TESTING */
+
/* dump the contents of the buf_hashTablepp. */
-int cm_DumpBufHashTable(FILE *outputFile, char *cookie)
+int cm_DumpBufHashTable(FILE *outputFile, char *cookie, int lock)
{
int zilch;
cm_buf_t *bp;
char output[1024];
int i;
- lock_ObtainRead(&buf_globalLock);
+ if (cm_data.buf_hashTablepp == NULL)
+ return -1;
+
+ if (lock)
+ lock_ObtainRead(&buf_globalLock);
- sprintf(output, "%s - dumping buf_HashTable - buf_hashSize=%d\n", cookie, buf_hashSize);
+ StringCbPrintfA(output, sizeof(output), "%s - dumping buf_HashTable - buf_hashSize=%d\n",
+ cookie, cm_data.buf_hashSize);
WriteFile(outputFile, output, strlen(output), &zilch, NULL);
- for (i = 0; i < buf_hashSize; i++)
+ for (i = 0; i < cm_data.buf_hashSize; i++)
{
- for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp)
+ for (bp = cm_data.buf_hashTablepp[i]; bp; bp=bp->hashp)
{
if (bp->refCount)
{
- sprintf(output, "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d,"
- "vnode=%d, unique=%d), size=%d refCount=%d\n",
+ StringCbPrintfA(output, sizeof(output), "vnode=%d, unique=%d), size=%d refCount=%d\n",
cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume,
bp->fid.vnode, bp->fid.unique, bp->size, bp->refCount);
WriteFile(outputFile, output, strlen(output), &zilch, NULL);
}
}
- sprintf(output, "%s - Done dumping buf_HashTable.\n", cookie);
+ StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_HashTable.\n", cookie);
WriteFile(outputFile, output, strlen(output), &zilch, NULL);
- lock_ReleaseRead(&buf_globalLock);
+ if (lock)
+ lock_ReleaseRead(&buf_globalLock);
+ return 0;
}
+void buf_ForceTrace(BOOL flush)
+{
+ HANDLE handle;
+ int len;
+ char buf[256];
+
+ if (!buf_logp)
+ return;
+
+ len = GetTempPath(sizeof(buf)-10, buf);
+ StringCbCopyA(&buf[len], sizeof(buf)-len, "/afs-buffer.log");
+ handle = CreateFile(buf, GENERIC_WRITE, FILE_SHARE_READ,
+ NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (handle == INVALID_HANDLE_VALUE) {
+ osi_panic("Cannot create log file", __FILE__, __LINE__);
+ }
+ osi_LogPrint(buf_logp, handle);
+ if (flush)
+ FlushFileBuffers(handle);
+ CloseHandle(handle);
+}