X-Git-Url: https://git.openafs.org/?p=openafs.git;a=blobdiff_plain;f=src%2FWINNT%2Fafsd%2Fcm_buf.c;h=ed31f3733f3cb1e9aa53812daec10425ea9f83ea;hp=389c7f6474ae96b20ebb5750c9272a71718fcbab;hb=d3d4af7795dd7371750bfdb9612b6419561cf32b;hpb=d64679ee4c125f6df5772007b69a9d7a1b69c32e diff --git a/src/WINNT/afsd/cm_buf.c b/src/WINNT/afsd/cm_buf.c index 389c7f6..ed31f37 100644 --- a/src/WINNT/afsd/cm_buf.c +++ b/src/WINNT/afsd/cm_buf.c @@ -19,9 +19,14 @@ #include #include #include +#include #include "afsd.h" +#ifdef DEBUG +#define TRACE_BUFFER 1 +#endif + extern void afsi_log(char *pattern, ...); /* This module implements the buffer package used by the local transaction @@ -75,11 +80,15 @@ long buf_nbuffers = CM_BUF_BUFFERS; long buf_nOrigBuffers; long buf_bufferSize = CM_BUF_SIZE; long buf_hashSize = CM_BUF_HASHSIZE; +int buf_cacheType = CM_BUF_CACHETYPE_FILE; #ifndef DJGPP static HANDLE CacheHandle; +static +VOID * ViewOfFile; + static SYSTEM_INFO sysInfo; #endif /* !DJGPP */ @@ -109,58 +118,58 @@ extern int cm_diskCacheEnabled; /* hold a reference to an already held buffer */ void buf_Hold(cm_buf_t *bp) { - lock_ObtainWrite(&buf_globalLock); - bp->refCount++; - lock_ReleaseWrite(&buf_globalLock); + lock_ObtainWrite(&buf_globalLock); + bp->refCount++; + lock_ReleaseWrite(&buf_globalLock); } /* incremental sync daemon. Writes 1/10th of all the buffers every 5000 ms */ void buf_IncrSyncer(long parm) { - cm_buf_t *bp; /* buffer we're hacking on; held */ - long i; /* counter */ - long nAtOnce; /* how many to do at once */ - cm_req_t req; - - lock_ObtainWrite(&buf_globalLock); - bp = buf_allp; - bp->refCount++; - lock_ReleaseWrite(&buf_globalLock); - nAtOnce = buf_nbuffers / 10; - while (1) { + cm_buf_t *bp; /* buffer we're hacking on; held */ + long i; /* counter */ + long nAtOnce; /* how many to do at once */ + cm_req_t req; + + lock_ObtainWrite(&buf_globalLock); + bp = buf_allp; + bp->refCount++; + lock_ReleaseWrite(&buf_globalLock); + nAtOnce = buf_nbuffers / 10; + while (1) { #ifndef DJGPP - i = SleepEx(5000, 1); - if (i != 0) continue; + i = SleepEx(5000, 1); + if (i != 0) continue; #else - thrd_Sleep(5000); + thrd_Sleep(5000); #endif /* DJGPP */ - /* now go through our percentage of the buffers */ - for(i=0; iallp; - if (!bp) bp = buf_allp; - bp->refCount++; - lock_ReleaseWrite(&buf_globalLock); - } /* for loop over a bunch of buffers */ - } /* whole daemon's while loop */ + /* now go through our percentage of the buffers */ + for(i=0; iallp; + if (!bp) bp = buf_allp; + bp->refCount++; + lock_ReleaseWrite(&buf_globalLock); + } /* for loop over a bunch of buffers */ + } /* whole daemon's while loop */ } #ifndef DJGPP @@ -171,58 +180,58 @@ void buf_IncrSyncer(long parm) */ PSECURITY_ATTRIBUTES CreateCacheFileSA() { - PSECURITY_ATTRIBUTES psa; - PSECURITY_DESCRIPTOR psd; - SID_IDENTIFIER_AUTHORITY authority = SECURITY_NT_AUTHORITY; - PSID AdminSID; - DWORD AdminSIDlength; - PACL AdminOnlyACL; - DWORD ACLlength; - - /* Get Administrator SID */ - AllocateAndInitializeSid(&authority, 2, - SECURITY_BUILTIN_DOMAIN_RID, - DOMAIN_ALIAS_RID_ADMINS, - 0, 0, 0, 0, 0, 0, - &AdminSID); - - /* Create Administrator-only ACL */ - AdminSIDlength = GetLengthSid(AdminSID); - ACLlength = sizeof(ACL) + sizeof(ACCESS_ALLOWED_ACE) - + AdminSIDlength - sizeof(DWORD); - AdminOnlyACL = GlobalAlloc(GMEM_FIXED, ACLlength); - InitializeAcl(AdminOnlyACL, ACLlength, ACL_REVISION); - AddAccessAllowedAce(AdminOnlyACL, ACL_REVISION, - STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL, - AdminSID); - - /* Create security descriptor */ - psd = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_DESCRIPTOR)); - InitializeSecurityDescriptor(psd, SECURITY_DESCRIPTOR_REVISION); - SetSecurityDescriptorDacl(psd, TRUE, AdminOnlyACL, FALSE); - - /* Create security attributes structure */ - psa = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_ATTRIBUTES)); - psa->nLength = sizeof(SECURITY_ATTRIBUTES); - psa->lpSecurityDescriptor = psd; - psa->bInheritHandle = TRUE; - - return psa; -} + PSECURITY_ATTRIBUTES psa; + PSECURITY_DESCRIPTOR psd; + SID_IDENTIFIER_AUTHORITY authority = SECURITY_NT_AUTHORITY; + PSID AdminSID; + DWORD AdminSIDlength; + PACL AdminOnlyACL; + DWORD ACLlength; + + /* Get Administrator SID */ + AllocateAndInitializeSid(&authority, 2, + SECURITY_BUILTIN_DOMAIN_RID, + DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &AdminSID); + + /* Create Administrator-only ACL */ + AdminSIDlength = GetLengthSid(AdminSID); + ACLlength = sizeof(ACL) + sizeof(ACCESS_ALLOWED_ACE) + + AdminSIDlength - sizeof(DWORD); + AdminOnlyACL = GlobalAlloc(GMEM_FIXED, ACLlength); + InitializeAcl(AdminOnlyACL, ACLlength, ACL_REVISION); + AddAccessAllowedAce(AdminOnlyACL, ACL_REVISION, + STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL, + AdminSID); + + /* Create security descriptor */ + psd = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_DESCRIPTOR)); + InitializeSecurityDescriptor(psd, SECURITY_DESCRIPTOR_REVISION); + SetSecurityDescriptorDacl(psd, TRUE, AdminOnlyACL, FALSE); + + /* Create security attributes structure */ + psa = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_ATTRIBUTES)); + psa->nLength = sizeof(SECURITY_ATTRIBUTES); + psa->lpSecurityDescriptor = psd; + psa->bInheritHandle = TRUE; + + return psa; +} #endif /* !DJGPP */ #ifndef DJGPP /* Free a security attribute structure created by CreateCacheFileSA() */ VOID FreeCacheFileSA(PSECURITY_ATTRIBUTES psa) { - BOOL b1, b2; - PACL pAcl; - - GetSecurityDescriptorDacl(psa->lpSecurityDescriptor, &b1, &pAcl, &b2); - GlobalFree(pAcl); - GlobalFree(psa->lpSecurityDescriptor); - GlobalFree(psa); -} + BOOL b1, b2; + PACL pAcl; + + GetSecurityDescriptorDacl(psa->lpSecurityDescriptor, &b1, &pAcl, &b2); + GlobalFree(pAcl); + GlobalFree(psa->lpSecurityDescriptor); + GlobalFree(psa); +} #endif /* !DJGPP */ /* initialize the buffer package; called with no locks @@ -230,163 +239,183 @@ VOID FreeCacheFileSA(PSECURITY_ATTRIBUTES psa) */ long buf_Init(cm_buf_ops_t *opsp) { - static osi_once_t once; - cm_buf_t *bp; - long sectorSize; - thread_t phandle; + static osi_once_t once; + cm_buf_t *bp; + long sectorSize; + thread_t phandle; #ifndef DJGPP - HANDLE hf, hm; - PSECURITY_ATTRIBUTES psa; + HANDLE hf, hm; + PSECURITY_ATTRIBUTES psa; #endif /* !DJGPP */ - long i; - unsigned long pid; - char *data; - long cs; + long i; + unsigned long pid; + char *data; + long cs; #ifndef DJGPP - /* Get system info; all we really want is the allocation granularity */ - GetSystemInfo(&sysInfo); + /* Get system info; all we really want is the allocation granularity */ + GetSystemInfo(&sysInfo); #endif /* !DJGPP */ - /* Have to be able to reserve a whole chunk */ - if (((buf_nbuffers - 3) * buf_bufferSize) < cm_chunkSize) - return CM_ERROR_TOOFEWBUFS; + /* Have to be able to reserve a whole chunk */ + if (((buf_nbuffers - 3) * buf_bufferSize) < cm_chunkSize) + return CM_ERROR_TOOFEWBUFS; - /* recall for callouts */ - cm_buf_opsp = opsp; + /* recall for callouts */ + cm_buf_opsp = opsp; - if (osi_Once(&once)) { - /* initialize global locks */ - lock_InitializeRWLock(&buf_globalLock, "Global buffer lock"); + if (osi_Once(&once)) { + /* initialize global locks */ + lock_InitializeRWLock(&buf_globalLock, "Global buffer lock"); #ifndef DJGPP - /* - * Cache file mapping constrained by - * system allocation granularity; - * round up, assuming granularity is a power of two - */ - cs = buf_nbuffers * buf_bufferSize; - cs = (cs + (sysInfo.dwAllocationGranularity - 1)) - & ~(sysInfo.dwAllocationGranularity - 1); - if (cs != buf_nbuffers * buf_bufferSize) { - buf_nbuffers = cs / buf_bufferSize; - afsi_log("Cache size rounded up to %d buffers", - buf_nbuffers); - } + /* + * Cache file mapping constrained by + * system allocation granularity; + * round up, assuming granularity is a power of two + */ + cs = buf_nbuffers * buf_bufferSize; + cs = (cs + (sysInfo.dwAllocationGranularity - 1)) + & ~(sysInfo.dwAllocationGranularity - 1); + if (cs != buf_nbuffers * buf_bufferSize) { + buf_nbuffers = cs / buf_bufferSize; + afsi_log("Cache size rounded up to %d buffers", + buf_nbuffers); + } #endif /* !DJGPP */ - /* remember this for those who want to reset it */ - buf_nOrigBuffers = buf_nbuffers; + /* remember this for those who want to reset it */ + buf_nOrigBuffers = buf_nbuffers; - /* lower hash size to a prime number */ - buf_hashSize = osi_PrimeLessThan(buf_hashSize); + /* lower hash size to a prime number */ + buf_hashSize = osi_PrimeLessThan(buf_hashSize); - /* create hash table */ - buf_hashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *)); - memset((void *)buf_hashTablepp, 0, - buf_hashSize * sizeof(cm_buf_t *)); + /* create hash table */ + buf_hashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *)); + memset((void *)buf_hashTablepp, 0, + buf_hashSize * sizeof(cm_buf_t *)); - /* another hash table */ - buf_fileHashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *)); - memset((void *)buf_fileHashTablepp, 0, - buf_hashSize * sizeof(cm_buf_t *)); + /* another hash table */ + buf_fileHashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *)); + memset((void *)buf_fileHashTablepp, 0, + buf_hashSize * sizeof(cm_buf_t *)); - /* min value for which this works */ - sectorSize = 1; + /* min value for which this works */ + sectorSize = 1; #ifndef DJGPP - /* Reserve buffer space by mapping cache file */ - psa = CreateCacheFileSA(); - hf = CreateFile(cm_CachePath, - GENERIC_READ | GENERIC_WRITE, - FILE_SHARE_READ | FILE_SHARE_WRITE, - psa, - OPEN_ALWAYS, - FILE_ATTRIBUTE_NORMAL, - NULL); - if (hf == INVALID_HANDLE_VALUE) { - afsi_log("create file error %d", GetLastError()); - return CM_ERROR_INVAL; - } - FreeCacheFileSA(psa); - CacheHandle = hf; - hm = CreateFileMapping(hf, - NULL, - PAGE_READWRITE, - 0, buf_nbuffers * buf_bufferSize, - NULL); - if (hm == NULL) { - if (GetLastError() == ERROR_DISK_FULL) { - afsi_log("Error creating cache file mapping: disk full"); - return CM_ERROR_TOOMANYBUFS; - } - return CM_ERROR_INVAL; - } - data = MapViewOfFile(hm, - FILE_MAP_ALL_ACCESS, - 0, 0, - buf_nbuffers * buf_bufferSize); - if (data == NULL) { - CloseHandle(hf); - CloseHandle(hm); - return CM_ERROR_INVAL; - } - CloseHandle(hm); -#else - /* djgpp doesn't support memory mapped files */ - data = malloc(buf_nbuffers * buf_bufferSize); + if (buf_cacheType == CM_BUF_CACHETYPE_FILE) { + /* Reserve buffer space by mapping cache file */ + psa = CreateCacheFileSA(); + hf = CreateFile(cm_CachePath, + GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, + psa, + OPEN_ALWAYS, + FILE_ATTRIBUTE_NORMAL, + NULL); + FreeCacheFileSA(psa); + if (hf == INVALID_HANDLE_VALUE) { + afsi_log("Error creating cache file \"%s\" error %d", + cm_CachePath, GetLastError()); + return CM_ERROR_INVAL; + } + } else { /* buf_cacheType == CM_BUF_CACHETYPE_VIRTUAL */ + hf = INVALID_HANDLE_VALUE; + } + CacheHandle = hf; + hm = CreateFileMapping(hf, + NULL, + PAGE_READWRITE, + 0, buf_nbuffers * buf_bufferSize, + NULL); + if (hm == NULL) { + if (GetLastError() == ERROR_DISK_FULL) { + afsi_log("Error creating cache file \"%s\" mapping: disk full", + cm_CachePath); + return CM_ERROR_TOOMANYBUFS; + } + return CM_ERROR_INVAL; + } + ViewOfFile = MapViewOfFile(hm, + FILE_MAP_ALL_ACCESS, + 0, 0, + buf_nbuffers * buf_bufferSize); + if (ViewOfFile == NULL) { + afsi_log("Error mapping view of file: 0x%X", GetLastError()); + if (hf != INVALID_HANDLE_VALUE) + CloseHandle(hf); + CloseHandle(hm); + return CM_ERROR_INVAL; + } + CloseHandle(hm); + + data = ViewOfFile; +#else + /* djgpp doesn't support memory mapped files */ + data = malloc(buf_nbuffers * buf_bufferSize); #endif /* !DJGPP */ - /* create buffer headers and put in free list */ - bp = malloc(buf_nbuffers * sizeof(cm_buf_t)); - buf_allp = NULL; - for(i=0; iallp = buf_allp; - buf_allp = bp; - - osi_QAdd((osi_queue_t **)&buf_freeListp, &bp->q); - bp->flags |= CM_BUF_INLRU; - lock_InitializeMutex(&bp->mx, "Buffer mutex"); + /* thread on list of all buffers */ + bp->allp = buf_allp; + buf_allp = bp; - /* grab appropriate number of bytes from aligned zone */ - bp->datap = data; + osi_QAdd((osi_queue_t **)&buf_freeListp, &bp->q); + bp->flags |= CM_BUF_INLRU; + lock_InitializeMutex(&bp->mx, "Buffer mutex"); - /* setup last buffer pointer */ - if (i == 0) - buf_freeListEndp = bp; + /* grab appropriate number of bytes from aligned zone */ + bp->datap = data; - /* next */ - bp++; - data += buf_bufferSize; - } - - /* none reserved at first */ - buf_reservedBufs = 0; - - /* just for safety's sake */ - buf_maxReservedBufs = buf_nbuffers - 3; - - /* init the buffer trace log */ - buf_logp = osi_LogCreate("buffer", 10); + /* setup last buffer pointer */ + if (i == 0) + buf_freeListEndp = bp; - osi_EndOnce(&once); - - /* and create the incr-syncer */ - phandle = thrd_Create(0, 0, - (ThreadFunc) buf_IncrSyncer, 0, 0, &pid, - "buf_IncrSyncer"); + /* next */ + bp++; + data += buf_bufferSize; + } - osi_assertx(phandle != NULL, "buf: can't create incremental sync proc"); + /* none reserved at first */ + buf_reservedBufs = 0; + + /* just for safety's sake */ + buf_maxReservedBufs = buf_nbuffers - 3; + +#ifdef TRACE_BUFFER + /* init the buffer trace log */ + buf_logp = osi_LogCreate("buffer", 1000); + osi_LogEnable(buf_logp); +#endif + + osi_EndOnce(&once); + + /* and create the incr-syncer */ + phandle = thrd_Create(0, 0, + (ThreadFunc) buf_IncrSyncer, 0, 0, &pid, + "buf_IncrSyncer"); + + osi_assertx(phandle != NULL, "buf: can't create incremental sync proc"); #ifndef DJGPP - CloseHandle(phandle); + CloseHandle(phandle); #endif /* !DJGPP */ - } + } - return 0; + return 0; +} + +void +buf_Shutdown(void) +{ + UnmapViewOfFile(ViewOfFile); + CloseHandle(CacheHandle); } /* add nbuffers to the buffer pool, if possible. @@ -394,102 +423,114 @@ long buf_Init(cm_buf_ops_t *opsp) */ long buf_AddBuffers(long nbuffers) { - cm_buf_t *bp; - int i; - char *data; + cm_buf_t *bp; + int i; + char *data; #ifndef DJGPP - HANDLE hm; - long cs; + HANDLE hm; + long cs; afsi_log("%d buffers being added to the existing cache of size %d", nbuffers, buf_nbuffers); - /* - * Cache file mapping constrained by - * system allocation granularity; - * round up, assuming granularity is a power of two; - * assume existing cache size is already rounded + if (buf_cacheType == CM_BUF_CACHETYPE_VIRTUAL) { + /* The size of a virtual cache cannot be changed after it has + * been created. Subsequent calls to MapViewofFile() with + * an existing mapping object name would not allow the + * object to be resized. Return failure immediately. */ - cs = nbuffers * buf_bufferSize; - cs = (cs + (sysInfo.dwAllocationGranularity - 1)) - & ~(sysInfo.dwAllocationGranularity - 1); - if (cs != nbuffers * buf_bufferSize) { - nbuffers = cs / buf_bufferSize; - } - - /* Reserve additional buffer space by remapping cache file */ - hm = CreateFileMapping(CacheHandle, - NULL, - PAGE_READWRITE, - 0, (buf_nbuffers + nbuffers) * buf_bufferSize, - NULL); - if (hm == NULL) { - if (GetLastError() == ERROR_DISK_FULL) - return CM_ERROR_TOOMANYBUFS; - else - return CM_ERROR_INVAL; - } - data = MapViewOfFile(hm, - FILE_MAP_ALL_ACCESS, - 0, buf_nbuffers * buf_bufferSize, - nbuffers * buf_bufferSize); - if (data == NULL) { - CloseHandle(hm); - return CM_ERROR_INVAL; - } - CloseHandle(hm); + return CM_ERROR_INVAL; + } + + /* + * Cache file mapping constrained by + * system allocation granularity; + * round up, assuming granularity is a power of two; + * assume existing cache size is already rounded + */ + cs = nbuffers * buf_bufferSize; + cs = (cs + (sysInfo.dwAllocationGranularity - 1)) + & ~(sysInfo.dwAllocationGranularity - 1); + if (cs != nbuffers * buf_bufferSize) { + nbuffers = cs / buf_bufferSize; + } + + /* Reserve additional buffer space by remapping cache file */ + hm = CreateFileMapping(CacheHandle, + NULL, + PAGE_READWRITE, + 0, (buf_nbuffers + nbuffers) * buf_bufferSize, + NULL); + if (hm == NULL) { + if (GetLastError() == ERROR_DISK_FULL) + return CM_ERROR_TOOMANYBUFS; + else + return CM_ERROR_INVAL; + } + data = MapViewOfFile(hm, + FILE_MAP_ALL_ACCESS, + 0, buf_nbuffers * buf_bufferSize, + nbuffers * buf_bufferSize); + if (data == NULL) { + CloseHandle(hm); + return CM_ERROR_INVAL; + } + CloseHandle(hm); #else - data = malloc(buf_nbuffers * buf_bufferSize); + data = malloc(buf_nbuffers * buf_bufferSize); #endif /* DJGPP */ - /* Create buffer headers and put in free list */ - bp = malloc(nbuffers * sizeof(*bp)); + /* Create buffer headers and put in free list */ + bp = malloc(nbuffers * sizeof(*bp)); - for(i=0; imx, "cm_buf_t"); + lock_InitializeMutex(&bp->mx, "cm_buf_t"); - /* grab appropriate number of bytes from aligned zone */ - bp->datap = data; + /* grab appropriate number of bytes from aligned zone */ + bp->datap = data; - bp->flags |= CM_BUF_INLRU; - - lock_ObtainWrite(&buf_globalLock); - /* note that buf_allp chain is covered by buf_globalLock now */ - bp->allp = buf_allp; - buf_allp = bp; - osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q); - if (!buf_freeListEndp) buf_freeListEndp = bp; - buf_nbuffers++; - lock_ReleaseWrite(&buf_globalLock); + bp->flags |= CM_BUF_INLRU; - bp++; - data += buf_bufferSize; + lock_ObtainWrite(&buf_globalLock); + /* note that buf_allp chain is covered by buf_globalLock now */ + bp->allp = buf_allp; + buf_allp = bp; + osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q); + if (!buf_freeListEndp) buf_freeListEndp = bp; + buf_nbuffers++; + lock_ReleaseWrite(&buf_globalLock); + + bp++; + data += buf_bufferSize; - } /* for loop over all buffers */ + } /* for loop over all buffers */ - return 0; -} + return 0; +} /* interface to set the number of buffers to an exact figure. * Called with no locks held. */ long buf_SetNBuffers(long nbuffers) { - if (nbuffers < 10) return CM_ERROR_INVAL; - if (nbuffers == buf_nbuffers) return 0; - else if (nbuffers > buf_nbuffers) - return buf_AddBuffers(nbuffers - buf_nbuffers); - else return CM_ERROR_INVAL; + if (nbuffers < 10) + return CM_ERROR_INVAL; + if (nbuffers == buf_nbuffers) + return 0; + else if (nbuffers > buf_nbuffers) + return buf_AddBuffers(nbuffers - buf_nbuffers); + else + return CM_ERROR_INVAL; } /* release a buffer. Buffer must be referenced, but unlocked. */ void buf_Release(cm_buf_t *bp) { - lock_ObtainWrite(&buf_globalLock); - buf_LockedRelease(bp); - lock_ReleaseWrite(&buf_globalLock); + lock_ObtainWrite(&buf_globalLock); + buf_LockedRelease(bp); + lock_ReleaseWrite(&buf_globalLock); } /* wait for reading or writing to clear; called with write-locked @@ -497,10 +538,10 @@ void buf_Release(cm_buf_t *bp) */ void buf_WaitIO(cm_buf_t *bp) { - while (1) { - /* if no IO is happening, we're done */ - if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING))) - break; + while (1) { + /* if no IO is happening, we're done */ + if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING))) + break; /* otherwise I/O is happening, but some other thread is waiting for * the I/O already. Wait for that guy to figure out what happened, @@ -512,14 +553,14 @@ void buf_WaitIO(cm_buf_t *bp) bp->flags |= CM_BUF_WAITING; osi_SleepM((long) bp, &bp->mx); lock_ObtainMutex(&bp->mx); - osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%x", bp); + osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%x", bp); } /* if we get here, the IO is done, but we may have to wakeup people waiting for * the I/O to complete. Do so. */ if (bp->flags & CM_BUF_WAITING) { - bp->flags &= ~CM_BUF_WAITING; + bp->flags &= ~CM_BUF_WAITING; osi_Wakeup((long) bp); } osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%x", (long) bp); @@ -528,40 +569,40 @@ void buf_WaitIO(cm_buf_t *bp) /* code to drop reference count while holding buf_globalLock */ void buf_LockedRelease(cm_buf_t *bp) { - /* ensure that we're in the LRU queue if our ref count is 0 */ - osi_assert(bp->refCount > 0); - if (--bp->refCount == 0) { - if (!(bp->flags & CM_BUF_INLRU)) { - osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q); - - /* watch for transition from empty to one element */ - if (!buf_freeListEndp) - buf_freeListEndp = buf_freeListp; - bp->flags |= CM_BUF_INLRU; - } + /* ensure that we're in the LRU queue if our ref count is 0 */ + osi_assert(bp->refCount > 0); + if (--bp->refCount == 0) { + if (!(bp->flags & CM_BUF_INLRU)) { + osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q); + + /* watch for transition from empty to one element */ + if (!buf_freeListEndp) + buf_freeListEndp = buf_freeListp; + bp->flags |= CM_BUF_INLRU; } -} + } +} /* find a buffer, if any, for a particular file ID and offset. Assumes * that buf_globalLock is write locked when called. */ cm_buf_t *buf_LockedFind(struct cm_scache *scp, osi_hyper_t *offsetp) { - long i; - cm_buf_t *bp; - - i = BUF_HASH(&scp->fid, offsetp); - for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) { - if (cm_FidCmp(&scp->fid, &bp->fid) == 0 - && offsetp->LowPart == bp->offset.LowPart - && offsetp->HighPart == bp->offset.HighPart) { - bp->refCount++; - break; - } + long i; + cm_buf_t *bp; + + i = BUF_HASH(&scp->fid, offsetp); + for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) { + if (cm_FidCmp(&scp->fid, &bp->fid) == 0 + && offsetp->LowPart == bp->offset.LowPart + && offsetp->HighPart == bp->offset.HighPart) { + bp->refCount++; + break; } + } - /* return whatever we found, if anything */ - return bp; + /* return whatever we found, if anything */ + return bp; } /* find a buffer with offset *offsetp for vnode *scp. Called @@ -569,14 +610,14 @@ cm_buf_t *buf_LockedFind(struct cm_scache *scp, osi_hyper_t *offsetp) */ cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp) { - cm_buf_t *bp; + cm_buf_t *bp; - lock_ObtainWrite(&buf_globalLock); - bp = buf_LockedFind(scp, offsetp); - lock_ReleaseWrite(&buf_globalLock); + lock_ObtainWrite(&buf_globalLock); + bp = buf_LockedFind(scp, offsetp); + lock_ReleaseWrite(&buf_globalLock); - return bp; -} + return bp; +} /* start cleaning I/O on this buffer. Buffer must be write locked, and is returned * write-locked. @@ -587,37 +628,38 @@ cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp) */ void buf_LockedCleanAsync(cm_buf_t *bp, cm_req_t *reqp) { - long code; + long code; - code = 0; - while ((bp->flags & (CM_BUF_WRITING | CM_BUF_DIRTY)) == CM_BUF_DIRTY) { - lock_ReleaseMutex(&bp->mx); + code = 0; + while ((bp->flags & (CM_BUF_WRITING | CM_BUF_DIRTY)) == CM_BUF_DIRTY) { + lock_ReleaseMutex(&bp->mx); - code = (*cm_buf_opsp->Writep)(&bp->fid, &bp->offset, - buf_bufferSize, 0, bp->userp, - reqp); + code = (*cm_buf_opsp->Writep)(&bp->fid, &bp->offset, + buf_bufferSize, 0, bp->userp, + reqp); - lock_ObtainMutex(&bp->mx); - if (code) break; + lock_ObtainMutex(&bp->mx); + if (code) + break; #ifdef DISKCACHE95 - /* Disk cache support */ - /* write buffer to disk cache (synchronous for now) */ - diskcache_Update(bp->dcp, bp->datap, buf_bufferSize, bp->dataVersion); + /* Disk cache support */ + /* write buffer to disk cache (synchronous for now) */ + diskcache_Update(bp->dcp, bp->datap, buf_bufferSize, bp->dataVersion); #endif /* DISKCACHE95 */ - }; + }; - /* do logging after call to GetLastError, or else */ - osi_Log2(buf_logp, "buf_CleanAsync starts I/O on 0x%x, done=%d", bp, code); + /* do logging after call to GetLastError, or else */ + osi_Log2(buf_logp, "buf_CleanAsync starts I/O on 0x%x, done=%d", bp, code); - /* if someone was waiting for the I/O that just completed or failed, - * wake them up. - */ - if (bp->flags & CM_BUF_WAITING) { - /* turn off flags and wakeup users */ - bp->flags &= ~CM_BUF_WAITING; - osi_Wakeup((long) bp); - } + /* if someone was waiting for the I/O that just completed or failed, + * wake them up. + */ + if (bp->flags & CM_BUF_WAITING) { + /* turn off flags and wakeup users */ + bp->flags &= ~CM_BUF_WAITING; + osi_Wakeup((long) bp); + } } /* Called with a zero-ref count buffer and with the buf_globalLock write locked. @@ -626,64 +668,63 @@ void buf_LockedCleanAsync(cm_buf_t *bp, cm_req_t *reqp) */ void buf_Recycle(cm_buf_t *bp) { - int i; - cm_buf_t **lbpp; - cm_buf_t *tbp; - cm_buf_t *prevBp, *nextBp; - - /* if we get here, we know that the buffer still has a 0 ref count, - * and that it is clean and has no currently pending I/O. This is - * the dude to return. - * Remember that as long as the ref count is 0, we know that we won't - * have any lock conflicts, so we can grab the buffer lock out of - * order in the locking hierarchy. - */ - osi_Log2(buf_logp, - "buf_Recycle recycles 0x%x, off 0x%x", - bp, bp->offset.LowPart); - - osi_assert(bp->refCount == 0); - osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY))); - lock_AssertWrite(&buf_globalLock); - - if (bp->flags & CM_BUF_INHASH) { - /* Remove from hash */ - - i = BUF_HASH(&bp->fid, &bp->offset); - lbpp = &(buf_hashTablepp[i]); - for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) { - if (tbp == bp) break; - } - - /* we better find it */ - osi_assertx(tbp != NULL, "buf_GetNewLocked: hash table screwup"); - - *lbpp = bp->hashp; /* hash out */ - - /* Remove from file hash */ - - i = BUF_FILEHASH(&bp->fid); - prevBp = bp->fileHashBackp; - nextBp = bp->fileHashp; - if (prevBp) - prevBp->fileHashp = nextBp; - else - buf_fileHashTablepp[i] = nextBp; - if (nextBp) - nextBp->fileHashBackp = prevBp; - - bp->flags &= ~CM_BUF_INHASH; - } - - /* bump the soft reference counter now, to invalidate softRefs; no - * wakeup is required since people don't sleep waiting for this - * counter to change. - */ - bp->idCounter++; + int i; + cm_buf_t **lbpp; + cm_buf_t *tbp; + cm_buf_t *prevBp, *nextBp; + + /* if we get here, we know that the buffer still has a 0 ref count, + * and that it is clean and has no currently pending I/O. This is + * the dude to return. + * Remember that as long as the ref count is 0, we know that we won't + * have any lock conflicts, so we can grab the buffer lock out of + * order in the locking hierarchy. + */ + osi_Log2( buf_logp, "buf_Recycle recycles 0x%x, off 0x%x", + bp, bp->offset.LowPart); - /* make the fid unrecognizable */ - memset(&bp->fid, 0, sizeof(bp->fid)); -} + osi_assert(bp->refCount == 0); + osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY))); + lock_AssertWrite(&buf_globalLock); + + if (bp->flags & CM_BUF_INHASH) { + /* Remove from hash */ + + i = BUF_HASH(&bp->fid, &bp->offset); + lbpp = &(buf_hashTablepp[i]); + for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) { + if (tbp == bp) break; + } + + /* we better find it */ + osi_assertx(tbp != NULL, "buf_GetNewLocked: hash table screwup"); + + *lbpp = bp->hashp; /* hash out */ + + /* Remove from file hash */ + + i = BUF_FILEHASH(&bp->fid); + prevBp = bp->fileHashBackp; + nextBp = bp->fileHashp; + if (prevBp) + prevBp->fileHashp = nextBp; + else + buf_fileHashTablepp[i] = nextBp; + if (nextBp) + nextBp->fileHashBackp = prevBp; + + bp->flags &= ~CM_BUF_INHASH; + } + + /* bump the soft reference counter now, to invalidate softRefs; no + * wakeup is required since people don't sleep waiting for this + * counter to change. + */ + bp->idCounter++; + + /* make the fid unrecognizable */ + memset(&bp->fid, 0, sizeof(bp->fid)); +} /* recycle a buffer, removing it from the free list, hashing in its new identity * and returning it write-locked so that no one can use it. Called without @@ -698,153 +739,154 @@ void buf_Recycle(cm_buf_t *bp) */ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp) { - cm_buf_t *bp; /* buffer we're dealing with */ - cm_buf_t *nextBp; /* next buffer in file hash chain */ - long i; /* temp */ - cm_req_t req; - - cm_InitReq(&req); /* just in case */ - - while(1) { -retry: - lock_ObtainWrite(&buf_globalLock); - /* check to see if we lost the race */ - if (scp) { - if (bp = buf_LockedFind(scp, offsetp)) { - bp->refCount--; - lock_ReleaseWrite(&buf_globalLock); - return CM_BUF_EXISTS; - } - } - - /* for debugging, assert free list isn't empty, although we - * really should try waiting for a running tranasction to finish - * instead of this; or better, we should have a transaction - * throttler prevent us from entering this situation. - */ - osi_assertx(buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers"); + cm_buf_t *bp; /* buffer we're dealing with */ + cm_buf_t *nextBp; /* next buffer in file hash chain */ + long i; /* temp */ + cm_req_t req; - /* look at all buffers in free list, some of which may temp. - * have high refcounts and which then should be skipped, - * starting cleaning I/O for those which are dirty. If we find - * a clean buffer, we rehash it, lock it and return it. - */ - for(bp = buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) { - /* check to see if it really has zero ref count. This - * code can bump refcounts, at least, so it may not be - * zero. - */ - if (bp->refCount > 0) continue; - - /* we don't have to lock buffer itself, since the ref - * count is 0 and we know it will stay zero as long as - * we hold the global lock. - */ - - /* don't recycle someone in our own chunk */ - if (!cm_FidCmp(&bp->fid, &scp->fid) - && (bp->offset.LowPart & (-cm_chunkSize)) - == (offsetp->LowPart & (-cm_chunkSize))) - continue; - - /* if this page is being filled (!) or cleaned, see if - * the I/O has completed. If not, skip it, otherwise - * do the final processing for the I/O. - */ - if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) { - /* probably shouldn't do this much work while - * holding the big lock? Watch for contention - * here. - */ - continue; - } - - if (bp->flags & CM_BUF_DIRTY) { - /* if the buffer is dirty, start cleaning it and - * move on to the next buffer. We do this with - * just the lock required to minimize contention - * on the big lock. - */ - bp->refCount++; - lock_ReleaseWrite(&buf_globalLock); - - /* grab required lock and clean; this only - * starts the I/O. By the time we're back, - * it'll still be marked dirty, but it will also - * have the WRITING flag set, so we won't get - * back here. - */ - buf_CleanAsync(bp, &req); - - /* now put it back and go around again */ - buf_Release(bp); - goto retry; - } - - /* if we get here, we know that the buffer still has a 0 - * ref count, and that it is clean and has no currently - * pending I/O. This is the dude to return. - * Remember that as long as the ref count is 0, we know - * that we won't have any lock conflicts, so we can grab - * the buffer lock out of order in the locking hierarchy. - */ - buf_Recycle(bp); - - /* clean up junk flags */ - bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR); - bp->dataVersion = -1; /* unknown so far */ - - /* now hash in as our new buffer, and give it the - * appropriate label, if requested. - */ - if (scp) { - bp->flags |= CM_BUF_INHASH; - bp->fid = scp->fid; - bp->offset = *offsetp; - i = BUF_HASH(&scp->fid, offsetp); - bp->hashp = buf_hashTablepp[i]; - buf_hashTablepp[i] = bp; - i = BUF_FILEHASH(&scp->fid); - nextBp = buf_fileHashTablepp[i]; - bp->fileHashp = nextBp; - bp->fileHashBackp = NULL; - if (nextBp) - nextBp->fileHashBackp = bp; - buf_fileHashTablepp[i] = bp; - } + cm_InitReq(&req); /* just in case */ + + while(1) { + retry: + lock_ObtainWrite(&buf_globalLock); + /* check to see if we lost the race */ + if (scp) { + if (bp = buf_LockedFind(scp, offsetp)) { + bp->refCount--; + lock_ReleaseWrite(&buf_globalLock); + return CM_BUF_EXISTS; + } + } + + /* for debugging, assert free list isn't empty, although we + * really should try waiting for a running tranasction to finish + * instead of this; or better, we should have a transaction + * throttler prevent us from entering this situation. + */ + osi_assertx(buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers"); + + /* look at all buffers in free list, some of which may temp. + * have high refcounts and which then should be skipped, + * starting cleaning I/O for those which are dirty. If we find + * a clean buffer, we rehash it, lock it and return it. + */ + for(bp = buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) { + /* check to see if it really has zero ref count. This + * code can bump refcounts, at least, so it may not be + * zero. + */ + if (bp->refCount > 0) + continue; - /* prepare to return it. Start by giving it a good - * refcount */ - bp->refCount = 1; + /* we don't have to lock buffer itself, since the ref + * count is 0 and we know it will stay zero as long as + * we hold the global lock. + */ + + /* don't recycle someone in our own chunk */ + if (!cm_FidCmp(&bp->fid, &scp->fid) + && (bp->offset.LowPart & (-cm_chunkSize)) + == (offsetp->LowPart & (-cm_chunkSize))) + continue; + + /* if this page is being filled (!) or cleaned, see if + * the I/O has completed. If not, skip it, otherwise + * do the final processing for the I/O. + */ + if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) { + /* probably shouldn't do this much work while + * holding the big lock? Watch for contention + * here. + */ + continue; + } - /* and since it has a non-zero ref count, we should move - * it from the lru queue. It better be still there, - * since we've held the global (big) lock since we found - * it there. - */ - osi_assertx(bp->flags & CM_BUF_INLRU, - "buf_GetNewLocked: LRU screwup"); - if (buf_freeListEndp == bp) { - /* we're the last guy in this queue, so maintain it */ - buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q); - } - osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q); - bp->flags &= ~CM_BUF_INLRU; + if (bp->flags & CM_BUF_DIRTY) { + /* if the buffer is dirty, start cleaning it and + * move on to the next buffer. We do this with + * just the lock required to minimize contention + * on the big lock. + */ + bp->refCount++; + lock_ReleaseWrite(&buf_globalLock); + + /* grab required lock and clean; this only + * starts the I/O. By the time we're back, + * it'll still be marked dirty, but it will also + * have the WRITING flag set, so we won't get + * back here. + */ + buf_CleanAsync(bp, &req); + + /* now put it back and go around again */ + buf_Release(bp); + goto retry; + } + + /* if we get here, we know that the buffer still has a 0 + * ref count, and that it is clean and has no currently + * pending I/O. This is the dude to return. + * Remember that as long as the ref count is 0, we know + * that we won't have any lock conflicts, so we can grab + * the buffer lock out of order in the locking hierarchy. + */ + buf_Recycle(bp); + + /* clean up junk flags */ + bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR); + bp->dataVersion = -1; /* unknown so far */ + + /* now hash in as our new buffer, and give it the + * appropriate label, if requested. + */ + if (scp) { + bp->flags |= CM_BUF_INHASH; + bp->fid = scp->fid; + bp->offset = *offsetp; + i = BUF_HASH(&scp->fid, offsetp); + bp->hashp = buf_hashTablepp[i]; + buf_hashTablepp[i] = bp; + i = BUF_FILEHASH(&scp->fid); + nextBp = buf_fileHashTablepp[i]; + bp->fileHashp = nextBp; + bp->fileHashBackp = NULL; + if (nextBp) + nextBp->fileHashBackp = bp; + buf_fileHashTablepp[i] = bp; + } + + /* prepare to return it. Start by giving it a good + * refcount */ + bp->refCount = 1; - /* finally, grab the mutex so that people don't use it - * before the caller fills it with data. Again, no one - * should have been able to get to this dude to lock it. - */ - osi_assertx(lock_TryMutex(&bp->mx), - "buf_GetNewLocked: TryMutex failed"); - - lock_ReleaseWrite(&buf_globalLock); - *bufpp = bp; - return 0; - } /* for all buffers in lru queue */ - lock_ReleaseWrite(&buf_globalLock); - } /* while loop over everything */ - /* not reached */ + /* and since it has a non-zero ref count, we should move + * it from the lru queue. It better be still there, + * since we've held the global (big) lock since we found + * it there. + */ + osi_assertx(bp->flags & CM_BUF_INLRU, + "buf_GetNewLocked: LRU screwup"); + if (buf_freeListEndp == bp) { + /* we're the last guy in this queue, so maintain it */ + buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q); + } + osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q); + bp->flags &= ~CM_BUF_INLRU; + + /* finally, grab the mutex so that people don't use it + * before the caller fills it with data. Again, no one + * should have been able to get to this dude to lock it. + */ + osi_assertx(lock_TryMutex(&bp->mx), + "buf_GetNewLocked: TryMutex failed"); + + lock_ReleaseWrite(&buf_globalLock); + *bufpp = bp; + return 0; + } /* for all buffers in lru queue */ + lock_ReleaseWrite(&buf_globalLock); + } /* while loop over everything */ + /* not reached */ } /* the proc */ /* get a page, returning it held but unlocked. Doesn't fill in the page @@ -852,185 +894,189 @@ retry: */ long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp) { - cm_buf_t *bp; - long code; - osi_hyper_t pageOffset; - int created; - - created = 0; - pageOffset.HighPart = offsetp->HighPart; - pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1); - while (1) { - lock_ObtainWrite(&buf_globalLock); - bp = buf_LockedFind(scp, &pageOffset); - lock_ReleaseWrite(&buf_globalLock); - if (bp) { - /* lock it and break out */ - lock_ObtainMutex(&bp->mx); - break; - } - - /* otherwise, we have to create a page */ - code = buf_GetNewLocked(scp, &pageOffset, &bp); + cm_buf_t *bp; + long code; + osi_hyper_t pageOffset; + int created; + + created = 0; + pageOffset.HighPart = offsetp->HighPart; + pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1); + while (1) { + lock_ObtainWrite(&buf_globalLock); + bp = buf_LockedFind(scp, &pageOffset); + lock_ReleaseWrite(&buf_globalLock); + if (bp) { + /* lock it and break out */ + lock_ObtainMutex(&bp->mx); + break; + } - /* check if the buffer was created in a race condition branch. - * If so, go around so we can hold a reference to it. - */ - if (code == CM_BUF_EXISTS) continue; - - /* something else went wrong */ - if (code != 0) return code; - - /* otherwise, we have a locked buffer that we just created */ - created = 1; - break; - } /* big while loop */ - - /* wait for reads */ - if (bp->flags & CM_BUF_READING) - buf_WaitIO(bp); + /* otherwise, we have to create a page */ + code = buf_GetNewLocked(scp, &pageOffset, &bp); - /* once it has been read once, we can unlock it and return it, still - * with its refcount held. + /* check if the buffer was created in a race condition branch. + * If so, go around so we can hold a reference to it. */ - lock_ReleaseMutex(&bp->mx); - *bufpp = bp; - osi_Log3(buf_logp, "buf_GetNew returning bp 0x%x for file 0x%x, offset 0x%x", - bp, (long) scp, offsetp->LowPart); - return 0; + if (code == CM_BUF_EXISTS) + continue; + + /* something else went wrong */ + if (code != 0) + return code; + + /* otherwise, we have a locked buffer that we just created */ + created = 1; + break; + } /* big while loop */ + + /* wait for reads */ + if (bp->flags & CM_BUF_READING) + buf_WaitIO(bp); + + /* once it has been read once, we can unlock it and return it, still + * with its refcount held. + */ + lock_ReleaseMutex(&bp->mx); + *bufpp = bp; + osi_Log3(buf_logp, "buf_GetNew returning bp 0x%x for file 0x%x, offset 0x%x", + bp, (long) scp, offsetp->LowPart); + return 0; } /* get a page, returning it held but unlocked. Make sure it is complete */ long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp) { - cm_buf_t *bp; - long code; - osi_hyper_t pageOffset; - unsigned long tcount; - int created; + cm_buf_t *bp; + long code; + osi_hyper_t pageOffset; + unsigned long tcount; + int created; #ifdef DISKCACHE95 - cm_diskcache_t *dcp; + cm_diskcache_t *dcp; #endif /* DISKCACHE95 */ - created = 0; - pageOffset.HighPart = offsetp->HighPart; - pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1); - while (1) { - lock_ObtainWrite(&buf_globalLock); - bp = buf_LockedFind(scp, &pageOffset); - lock_ReleaseWrite(&buf_globalLock); - if (bp) { - /* lock it and break out */ - lock_ObtainMutex(&bp->mx); - break; + created = 0; + pageOffset.HighPart = offsetp->HighPart; + pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1); + while (1) { + lock_ObtainWrite(&buf_globalLock); + bp = buf_LockedFind(scp, &pageOffset); + lock_ReleaseWrite(&buf_globalLock); + if (bp) { + /* lock it and break out */ + lock_ObtainMutex(&bp->mx); + break; #ifdef DISKCACHE95 - /* touch disk chunk to update LRU info */ - diskcache_Touch(bp->dcp); + /* touch disk chunk to update LRU info */ + diskcache_Touch(bp->dcp); #endif /* DISKCACHE95 */ - } - - /* otherwise, we have to create a page */ - code = buf_GetNewLocked(scp, &pageOffset, &bp); + } - /* check if the buffer was created in a race condition branch. - * If so, go around so we can hold a reference to it. - */ - if (code == CM_BUF_EXISTS) continue; - - /* something else went wrong */ - if (code != 0) return code; - - /* otherwise, we have a locked buffer that we just created */ - created = 1; - break; - } /* big while loop */ - - /* if we get here, we have a locked buffer that may have just been - * created, in which case it needs to be filled with data. + /* otherwise, we have to create a page */ + code = buf_GetNewLocked(scp, &pageOffset, &bp); + + /* check if the buffer was created in a race condition branch. + * If so, go around so we can hold a reference to it. */ - if (created) { - /* load the page; freshly created pages should be idle */ - osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING))); + if (code == CM_BUF_EXISTS) + continue; - /* setup offset, event */ + /* something else went wrong */ + if (code != 0) + return code; + + /* otherwise, we have a locked buffer that we just created */ + created = 1; + break; + } /* big while loop */ + + /* if we get here, we have a locked buffer that may have just been + * created, in which case it needs to be filled with data. + */ + if (created) { + /* load the page; freshly created pages should be idle */ + osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING))); + + /* setup offset, event */ #ifndef DJGPP /* doesn't seem to be used */ - bp->over.Offset = bp->offset.LowPart; - bp->over.OffsetHigh = bp->offset.HighPart; + bp->over.Offset = bp->offset.LowPart; + bp->over.OffsetHigh = bp->offset.HighPart; #endif /* !DJGPP */ - /* start the I/O; may drop lock */ - bp->flags |= CM_BUF_READING; - code = (*cm_buf_opsp->Readp)(bp, buf_bufferSize, &tcount, NULL); + /* start the I/O; may drop lock */ + bp->flags |= CM_BUF_READING; + code = (*cm_buf_opsp->Readp)(bp, buf_bufferSize, &tcount, NULL); #ifdef DISKCACHE95 - code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, buf_bufferSize, &bp->dataVersion, &tcount, &dcp); - bp->dcp = dcp; /* pointer to disk cache struct. */ + code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, buf_bufferSize, &bp->dataVersion, &tcount, &dcp); + bp->dcp = dcp; /* pointer to disk cache struct. */ #endif /* DISKCACHE95 */ - if (code != 0) { - /* failure or queued */ + if (code != 0) { + /* failure or queued */ #ifndef DJGPP /* cm_bufRead always returns 0 */ - if (code != ERROR_IO_PENDING) { + if (code != ERROR_IO_PENDING) { #endif - bp->error = code; - bp->flags |= CM_BUF_ERROR; - bp->flags &= ~CM_BUF_READING; - if (bp->flags & CM_BUF_WAITING) { - bp->flags &= ~CM_BUF_WAITING; - osi_Wakeup((long) bp); - } - lock_ReleaseMutex(&bp->mx); - buf_Release(bp); - return code; + bp->error = code; + bp->flags |= CM_BUF_ERROR; + bp->flags &= ~CM_BUF_READING; + if (bp->flags & CM_BUF_WAITING) { + bp->flags &= ~CM_BUF_WAITING; + osi_Wakeup((long) bp); + } + lock_ReleaseMutex(&bp->mx); + buf_Release(bp); + return code; #ifndef DJGPP - } + } #endif - } else { - /* otherwise, I/O completed instantly and we're done, except - * for padding the xfr out with 0s and checking for EOF - */ - if (tcount < (unsigned long) buf_bufferSize) { - memset(bp->datap+tcount, 0, buf_bufferSize - tcount); - if (tcount == 0) - bp->flags |= CM_BUF_EOF; - } - bp->flags &= ~CM_BUF_READING; - if (bp->flags & CM_BUF_WAITING) { - bp->flags &= ~CM_BUF_WAITING; - osi_Wakeup((long) bp); - } - } - - } /* if created */ - - /* wait for reads, either that which we started above, or that someone - * else started. We don't care if we return a buffer being cleaned. - */ - if (bp->flags & CM_BUF_READING) - buf_WaitIO(bp); + } else { + /* otherwise, I/O completed instantly and we're done, except + * for padding the xfr out with 0s and checking for EOF + */ + if (tcount < (unsigned long) buf_bufferSize) { + memset(bp->datap+tcount, 0, buf_bufferSize - tcount); + if (tcount == 0) + bp->flags |= CM_BUF_EOF; + } + bp->flags &= ~CM_BUF_READING; + if (bp->flags & CM_BUF_WAITING) { + bp->flags &= ~CM_BUF_WAITING; + osi_Wakeup((long) bp); + } + } - /* once it has been read once, we can unlock it and return it, still - * with its refcount held. - */ - lock_ReleaseMutex(&bp->mx); - *bufpp = bp; + } /* if created */ - /* now remove from queue; will be put in at the head (farthest from - * being recycled) when we're done in buf_Release. - */ - lock_ObtainWrite(&buf_globalLock); - if (bp->flags & CM_BUF_INLRU) { - if (buf_freeListEndp == bp) - buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q); - osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q); - bp->flags &= ~CM_BUF_INLRU; - } - lock_ReleaseWrite(&buf_globalLock); + /* wait for reads, either that which we started above, or that someone + * else started. We don't care if we return a buffer being cleaned. + */ + if (bp->flags & CM_BUF_READING) + buf_WaitIO(bp); - osi_Log3(buf_logp, "buf_Get returning bp 0x%x for file 0x%x, offset 0x%x", - bp, (long) scp, offsetp->LowPart); - return 0; + /* once it has been read once, we can unlock it and return it, still + * with its refcount held. + */ + lock_ReleaseMutex(&bp->mx); + *bufpp = bp; + + /* now remove from queue; will be put in at the head (farthest from + * being recycled) when we're done in buf_Release. + */ + lock_ObtainWrite(&buf_globalLock); + if (bp->flags & CM_BUF_INLRU) { + if (buf_freeListEndp == bp) + buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q); + osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q); + bp->flags &= ~CM_BUF_INLRU; + } + lock_ReleaseWrite(&buf_globalLock); + + osi_Log3(buf_logp, "buf_Get returning bp 0x%x for file 0x%x, offset 0x%x", + bp, (long) scp, offsetp->LowPart); + return 0; } /* count # of elements in the free list; @@ -1040,40 +1086,40 @@ long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp) */ long buf_CountFreeList(void) { - long count; - cm_buf_t *bufp; - - count = 0; - lock_ObtainRead(&buf_globalLock); - for(bufp = buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) { - /* if the buffer doesn't have an identity, or if the buffer - * has been invalidate (by having its DV stomped upon), then - * count it as free, since it isn't really being utilized. - */ - if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0) - count++; - } - lock_ReleaseRead(&buf_globalLock); - return count; + long count; + cm_buf_t *bufp; + + count = 0; + lock_ObtainRead(&buf_globalLock); + for(bufp = buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) { + /* if the buffer doesn't have an identity, or if the buffer + * has been invalidate (by having its DV stomped upon), then + * count it as free, since it isn't really being utilized. + */ + if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0) + count++; + } + lock_ReleaseRead(&buf_globalLock); + return count; } /* clean a buffer synchronously */ void buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp) { - lock_ObtainMutex(&bp->mx); - buf_LockedCleanAsync(bp, reqp); - lock_ReleaseMutex(&bp->mx); -} + lock_ObtainMutex(&bp->mx); + buf_LockedCleanAsync(bp, reqp); + lock_ReleaseMutex(&bp->mx); +} /* wait for a buffer's cleaning to finish */ void buf_CleanWait(cm_buf_t *bp) { - lock_ObtainMutex(&bp->mx); - if (bp->flags & CM_BUF_WRITING) { - buf_WaitIO(bp); - } - lock_ReleaseMutex(&bp->mx); -} + lock_ObtainMutex(&bp->mx); + if (bp->flags & CM_BUF_WRITING) { + buf_WaitIO(bp); + } + lock_ReleaseMutex(&bp->mx); +} /* set the dirty flag on a buffer, and set associated write-ahead log, * if there is one. Allow one to be added to a buffer, but not changed. @@ -1082,15 +1128,15 @@ void buf_CleanWait(cm_buf_t *bp) */ void buf_SetDirty(cm_buf_t *bp) { - osi_assert(bp->refCount > 0); + osi_assert(bp->refCount > 0); - osi_Log1(buf_logp, "buf_SetDirty 0x%x", bp); + osi_Log1(buf_logp, "buf_SetDirty 0x%x", bp); - /* set dirty bit */ - bp->flags |= CM_BUF_DIRTY; + /* set dirty bit */ + bp->flags |= CM_BUF_DIRTY; - /* and turn off EOF flag, since it has associated data now */ - bp->flags &= ~CM_BUF_EOF; + /* and turn off EOF flag, since it has associated data now */ + bp->flags &= ~CM_BUF_EOF; } /* clean all buffers, reset log pointers and invalidate all buffers. @@ -1115,84 +1161,84 @@ void buf_SetDirty(cm_buf_t *bp) */ long buf_CleanAndReset(void) { - long i; - cm_buf_t *bp; - cm_req_t req; - - lock_ObtainWrite(&buf_globalLock); - for(i=0; ihashp) { - bp->refCount++; - lock_ReleaseWrite(&buf_globalLock); - - /* now no locks are held; clean buffer and go on */ - cm_InitReq(&req); - buf_CleanAsync(bp, &req); - buf_CleanWait(bp); - - /* relock and release buffer */ - lock_ObtainWrite(&buf_globalLock); - buf_LockedRelease(bp); - } /* over one bucket */ - } /* for loop over all hash buckets */ - - /* release locks */ - lock_ReleaseWrite(&buf_globalLock); + long i; + cm_buf_t *bp; + cm_req_t req; - /* and we're done */ - return 0; -} + lock_ObtainWrite(&buf_globalLock); + for(i=0; ihashp) { + bp->refCount++; + lock_ReleaseWrite(&buf_globalLock); + + /* now no locks are held; clean buffer and go on */ + cm_InitReq(&req); + buf_CleanAsync(bp, &req); + buf_CleanWait(bp); + + /* relock and release buffer */ + lock_ObtainWrite(&buf_globalLock); + buf_LockedRelease(bp); + } /* over one bucket */ + } /* for loop over all hash buckets */ + + /* release locks */ + lock_ReleaseWrite(&buf_globalLock); + + /* and we're done */ + return 0; +} /* called without global lock being held, reserves buffers for callers * that need more than one held (not locked) at once. */ void buf_ReserveBuffers(long nbuffers) { - lock_ObtainWrite(&buf_globalLock); - while (1) { - if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) { - buf_reserveWaiting = 1; - osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers); - osi_SleepW((long) &buf_reservedBufs, &buf_globalLock); - lock_ObtainWrite(&buf_globalLock); - } - else { - buf_reservedBufs += nbuffers; - break; - } + lock_ObtainWrite(&buf_globalLock); + while (1) { + if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) { + buf_reserveWaiting = 1; + osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers); + osi_SleepW((long) &buf_reservedBufs, &buf_globalLock); + lock_ObtainWrite(&buf_globalLock); + } + else { + buf_reservedBufs += nbuffers; + break; } - lock_ReleaseWrite(&buf_globalLock); + } + lock_ReleaseWrite(&buf_globalLock); } int buf_TryReserveBuffers(long nbuffers) { - int code; - - lock_ObtainWrite(&buf_globalLock); - if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) { - code = 0; - } - else { - buf_reservedBufs += nbuffers; - code = 1; - } - lock_ReleaseWrite(&buf_globalLock); - return code; -} + int code; + + lock_ObtainWrite(&buf_globalLock); + if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) { + code = 0; + } + else { + buf_reservedBufs += nbuffers; + code = 1; + } + lock_ReleaseWrite(&buf_globalLock); + return code; +} /* called without global lock held, releases reservation held by * buf_ReserveBuffers. */ void buf_UnreserveBuffers(long nbuffers) { - lock_ObtainWrite(&buf_globalLock); - buf_reservedBufs -= nbuffers; - if (buf_reserveWaiting) { - buf_reserveWaiting = 0; - osi_Wakeup((long) &buf_reservedBufs); - } - lock_ReleaseWrite(&buf_globalLock); -} + lock_ObtainWrite(&buf_globalLock); + buf_reservedBufs -= nbuffers; + if (buf_reserveWaiting) { + buf_reserveWaiting = 0; + osi_Wakeup((long) &buf_reservedBufs); + } + lock_ReleaseWrite(&buf_globalLock); +} /* truncate the buffers past sizep, zeroing out the page, if we don't * end on a page boundary. @@ -1200,220 +1246,222 @@ void buf_UnreserveBuffers(long nbuffers) * Requires cm_bufCreateLock to be write locked. */ long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp, - osi_hyper_t *sizep) + osi_hyper_t *sizep) { - cm_buf_t *bufp; - cm_buf_t *nbufp; /* next buffer, if didRelease */ - osi_hyper_t bufEnd; - long code; - long bufferPos; - int didRelease; - long i; - - /* assert that cm_bufCreateLock is held in write mode */ - lock_AssertWrite(&scp->bufCreateLock); - - i = BUF_FILEHASH(&scp->fid); - - lock_ObtainWrite(&buf_globalLock); - bufp = buf_fileHashTablepp[i]; - if (bufp == NULL) { - lock_ReleaseWrite(&buf_globalLock); - return 0; - } - - bufp->refCount++; - lock_ReleaseWrite(&buf_globalLock); - for(; bufp; bufp = nbufp) { - didRelease = 0; - lock_ObtainMutex(&bufp->mx); - - bufEnd.HighPart = 0; - bufEnd.LowPart = buf_bufferSize; - bufEnd = LargeIntegerAdd(bufEnd, bufp->offset); - - if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 && - LargeIntegerLessThan(*sizep, bufEnd)) { - buf_WaitIO(bufp); - } - lock_ObtainMutex(&scp->mx); + cm_buf_t *bufp; + cm_buf_t *nbufp; /* next buffer, if didRelease */ + osi_hyper_t bufEnd; + long code; + long bufferPos; + int didRelease; + long i; + + /* assert that cm_bufCreateLock is held in write mode */ + lock_AssertWrite(&scp->bufCreateLock); + + i = BUF_FILEHASH(&scp->fid); + + lock_ObtainWrite(&buf_globalLock); + bufp = buf_fileHashTablepp[i]; + if (bufp == NULL) { + lock_ReleaseWrite(&buf_globalLock); + return 0; + } + + bufp->refCount++; + lock_ReleaseWrite(&buf_globalLock); + for(; bufp; bufp = nbufp) { + didRelease = 0; + lock_ObtainMutex(&bufp->mx); + + bufEnd.HighPart = 0; + bufEnd.LowPart = buf_bufferSize; + bufEnd = LargeIntegerAdd(bufEnd, bufp->offset); + + if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 && + LargeIntegerLessThan(*sizep, bufEnd)) { + buf_WaitIO(bufp); + } + lock_ObtainMutex(&scp->mx); - /* make sure we have a callback (so we have the right value for - * the length), and wait for it to be safe to do a truncate. - */ - code = cm_SyncOp(scp, bufp, userp, reqp, 0, - CM_SCACHESYNC_NEEDCALLBACK - | CM_SCACHESYNC_GETSTATUS - | CM_SCACHESYNC_SETSIZE - | CM_SCACHESYNC_BUFLOCKED); - /* if we succeeded in our locking, and this applies to the right - * file, and the truncate request overlaps the buffer either - * totally or partially, then do something. + /* make sure we have a callback (so we have the right value for + * the length), and wait for it to be safe to do a truncate. + */ + code = cm_SyncOp(scp, bufp, userp, reqp, 0, + CM_SCACHESYNC_NEEDCALLBACK + | CM_SCACHESYNC_GETSTATUS + | CM_SCACHESYNC_SETSIZE + | CM_SCACHESYNC_BUFLOCKED); + /* if we succeeded in our locking, and this applies to the right + * file, and the truncate request overlaps the buffer either + * totally or partially, then do something. + */ + if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0 + && LargeIntegerLessThan(*sizep, bufEnd)) { + + lock_ObtainWrite(&buf_globalLock); + + /* destroy the buffer, turning off its dirty bit, if + * we're truncating the whole buffer. Otherwise, set + * the dirty bit, and clear out the tail of the buffer + * if we just overlap some. + */ + if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) { + /* truncating the entire page */ + bufp->flags &= ~CM_BUF_DIRTY; + bufp->dataVersion = -1; /* known bad */ + bufp->dirtyCounter++; + } + else { + /* don't set dirty, since dirty implies + * currently up-to-date. Don't need to do this, + * since we'll update the length anyway. + * + * Zero out remainder of the page, in case we + * seek and write past EOF, and make this data + * visible again. */ - if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0 - && LargeIntegerLessThan(*sizep, bufEnd)) { - - lock_ObtainWrite(&buf_globalLock); - - /* destroy the buffer, turning off its dirty bit, if - * we're truncating the whole buffer. Otherwise, set - * the dirty bit, and clear out the tail of the buffer - * if we just overlap some. - */ - if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) { - /* truncating the entire page */ - bufp->flags &= ~CM_BUF_DIRTY; - bufp->dataVersion = -1; /* known bad */ - bufp->dirtyCounter++; - } - else { - /* don't set dirty, since dirty implies - * currently up-to-date. Don't need to do this, - * since we'll update the length anyway. - * - * Zero out remainder of the page, in case we - * seek and write past EOF, and make this data - * visible again. - */ - bufferPos = sizep->LowPart & (buf_bufferSize - 1); - osi_assert(bufferPos != 0); - memset(bufp->datap + bufferPos, 0, - buf_bufferSize - bufferPos); - } - - lock_ReleaseWrite(&buf_globalLock); + bufferPos = sizep->LowPart & (buf_bufferSize - 1); + osi_assert(bufferPos != 0); + memset(bufp->datap + bufferPos, 0, + buf_bufferSize - bufferPos); + } - } + lock_ReleaseWrite(&buf_globalLock); + } - lock_ReleaseMutex(&scp->mx); - lock_ReleaseMutex(&bufp->mx); - if (!didRelease) { - lock_ObtainWrite(&buf_globalLock); - nbufp = bufp->fileHashp; - if (nbufp) nbufp->refCount++; - buf_LockedRelease(bufp); - lock_ReleaseWrite(&buf_globalLock); - } - - /* bail out early if we fail */ - if (code) { - /* at this point, nbufp is held; bufp has already been - * released. - */ - if (nbufp) buf_Release(nbufp); - return code; - } - } - - /* success */ - return 0; + lock_ReleaseMutex(&scp->mx); + lock_ReleaseMutex(&bufp->mx); + if (!didRelease) { + lock_ObtainWrite(&buf_globalLock); + nbufp = bufp->fileHashp; + if (nbufp) nbufp->refCount++; + buf_LockedRelease(bufp); + lock_ReleaseWrite(&buf_globalLock); + } + + /* bail out early if we fail */ + if (code) { + /* at this point, nbufp is held; bufp has already been + * released. + */ + if (nbufp) + buf_Release(nbufp); + return code; + } + } + + /* success */ + return 0; } long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp) { - long code; - cm_buf_t *bp; /* buffer we're hacking on */ - cm_buf_t *nbp; - int didRelease; - long i; - - i = BUF_FILEHASH(&scp->fid); - - code = 0; - lock_ObtainWrite(&buf_globalLock); - bp = buf_fileHashTablepp[i]; - if (bp) bp->refCount++; - lock_ReleaseWrite(&buf_globalLock); - for(; bp; bp = nbp) { - didRelease = 0; /* haven't released this buffer yet */ - - /* clean buffer synchronously */ - if (cm_FidCmp(&bp->fid, &scp->fid) == 0) { - lock_ObtainMutex(&bp->mx); - - /* start cleaning the buffer, and wait for it to finish */ - buf_LockedCleanAsync(bp, reqp); - buf_WaitIO(bp); - lock_ReleaseMutex(&bp->mx); - - code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp); - if (code) goto skip; - - lock_ObtainWrite(&buf_globalLock); - /* actually, we only know that buffer is clean if ref - * count is 1, since we don't have buffer itself locked. - */ - if (!(bp->flags & CM_BUF_DIRTY)) { - if (bp->refCount == 1) { /* bp is held above */ - buf_LockedRelease(bp); - nbp = bp->fileHashp; - if (nbp) nbp->refCount++; - didRelease = 1; - buf_Recycle(bp); - } - } - lock_ReleaseWrite(&buf_globalLock); - - (*cm_buf_opsp->Unstabilizep)(scp, userp); - } - -skip: - if (!didRelease) { - lock_ObtainWrite(&buf_globalLock); - if (nbp = bp->fileHashp) nbp->refCount++; - buf_LockedRelease(bp); - lock_ReleaseWrite(&buf_globalLock); - } - } /* for loop over a bunch of buffers */ - - /* done */ - return code; -} + long code; + cm_buf_t *bp; /* buffer we're hacking on */ + cm_buf_t *nbp; + int didRelease; + long i; + + i = BUF_FILEHASH(&scp->fid); + + code = 0; + lock_ObtainWrite(&buf_globalLock); + bp = buf_fileHashTablepp[i]; + if (bp) bp->refCount++; + lock_ReleaseWrite(&buf_globalLock); + for(; bp; bp = nbp) { + didRelease = 0; /* haven't released this buffer yet */ + + /* clean buffer synchronously */ + if (cm_FidCmp(&bp->fid, &scp->fid) == 0) { + lock_ObtainMutex(&bp->mx); + + /* start cleaning the buffer, and wait for it to finish */ + buf_LockedCleanAsync(bp, reqp); + buf_WaitIO(bp); + lock_ReleaseMutex(&bp->mx); + + code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp); + if (code) goto skip; + + lock_ObtainWrite(&buf_globalLock); + /* actually, we only know that buffer is clean if ref + * count is 1, since we don't have buffer itself locked. + */ + if (!(bp->flags & CM_BUF_DIRTY)) { + if (bp->refCount == 1) { /* bp is held above */ + buf_LockedRelease(bp); + nbp = bp->fileHashp; + if (nbp) nbp->refCount++; + didRelease = 1; + buf_Recycle(bp); + } + } + lock_ReleaseWrite(&buf_globalLock); + + (*cm_buf_opsp->Unstabilizep)(scp, userp); + } + + skip: + if (!didRelease) { + lock_ObtainWrite(&buf_globalLock); + if (nbp = bp->fileHashp) nbp->refCount++; + buf_LockedRelease(bp); + lock_ReleaseWrite(&buf_globalLock); + } + } /* for loop over a bunch of buffers */ + + /* done */ + return code; +} long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp) { - long code; - cm_buf_t *bp; /* buffer we're hacking on */ + long code; + cm_buf_t *bp; /* buffer we're hacking on */ cm_buf_t *nbp; /* next one */ - long i; + long i; - i = BUF_FILEHASH(&scp->fid); + i = BUF_FILEHASH(&scp->fid); - code = 0; - lock_ObtainWrite(&buf_globalLock); + code = 0; + lock_ObtainWrite(&buf_globalLock); bp = buf_fileHashTablepp[i]; if (bp) bp->refCount++; lock_ReleaseWrite(&buf_globalLock); - for(; bp; bp = nbp) { - /* clean buffer synchronously */ - if (cm_FidCmp(&bp->fid, &scp->fid) == 0) { - if (userp) { + for(; bp; bp = nbp) { + /* clean buffer synchronously */ + if (cm_FidCmp(&bp->fid, &scp->fid) == 0) { + if (userp) { cm_HoldUser(userp); - lock_ObtainMutex(&bp->mx); - if (bp->userp) + lock_ObtainMutex(&bp->mx); + if (bp->userp) cm_ReleaseUser(bp->userp); bp->userp = userp; - lock_ReleaseMutex(&bp->mx); - } - buf_CleanAsync(bp, reqp); + lock_ReleaseMutex(&bp->mx); + } + buf_CleanAsync(bp, reqp); buf_CleanWait(bp); lock_ObtainMutex(&bp->mx); - if (bp->flags & CM_BUF_ERROR) { - if (code == 0 || code == -1) code = bp->error; - if (code == 0) code = -1; + if (bp->flags & CM_BUF_ERROR) { + if (code == 0 || code == -1) + code = bp->error; + if (code == 0) + code = -1; } lock_ReleaseMutex(&bp->mx); - } + } - lock_ObtainWrite(&buf_globalLock); - buf_LockedRelease(bp); + lock_ObtainWrite(&buf_globalLock); + buf_LockedRelease(bp); nbp = bp->fileHashp; if (nbp) nbp->refCount++; - lock_ReleaseWrite(&buf_globalLock); - } /* for loop over a bunch of buffers */ - + lock_ReleaseWrite(&buf_globalLock); + } /* for loop over a bunch of buffers */ + /* done */ - return code; + return code; } /* dump the contents of the buf_hashTablepp. */ @@ -1424,22 +1472,21 @@ int cm_DumpBufHashTable(FILE *outputFile, char *cookie) char output[1024]; int i; - if (buf_hashTablepp == NULL) - return -1; + if (buf_hashTablepp == NULL) + return -1; lock_ObtainRead(&buf_globalLock); - sprintf(output, "%s - dumping buf_HashTable - buf_hashSize=%d\n", cookie, buf_hashSize); + StringCbPrintfA(output, sizeof(output), "%s - dumping buf_HashTable - buf_hashSize=%d\n", cookie, buf_hashSize); WriteFile(outputFile, output, strlen(output), &zilch, NULL); for (i = 0; i < buf_hashSize; i++) { - for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) + for (bp = buf_hashTablepp[i]; bp; bp=bp->hashp) { if (bp->refCount) { - sprintf(output, "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d," - "vnode=%d, unique=%d), size=%d refCount=%d\n", + StringCbPrintfA(output, sizeof(output), "vnode=%d, unique=%d), size=%d refCount=%d\n", cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume, bp->fid.vnode, bp->fid.unique, bp->size, bp->refCount); WriteFile(outputFile, output, strlen(output), &zilch, NULL); @@ -1447,10 +1494,31 @@ int cm_DumpBufHashTable(FILE *outputFile, char *cookie) } } - sprintf(output, "%s - Done dumping buf_HashTable.\n", cookie); + StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_HashTable.\n", cookie); WriteFile(outputFile, output, strlen(output), &zilch, NULL); lock_ReleaseRead(&buf_globalLock); return 0; } +void buf_ForceTrace(BOOL flush) +{ + HANDLE handle; + int len; + char buf[256]; + + if (!buf_logp) + return; + + len = GetTempPath(sizeof(buf)-10, buf); + StringCbCopyA(&buf[len], sizeof(buf)-len, "/afs-buffer.log"); + handle = CreateFile(buf, GENERIC_WRITE, FILE_SHARE_READ, + NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); + if (handle == INVALID_HANDLE_VALUE) { + osi_panic("Cannot create log file", __FILE__, __LINE__); + } + osi_LogPrint(buf_logp, handle); + if (flush) + FlushFileBuffers(handle); + CloseHandle(handle); +}