dns-and-server-ref-counts-20040530
[openafs.git] / src / WINNT / afsd / cm_buf.c
1 /*
2  * Copyright 2000, International Business Machines Corporation and others.
3  * All Rights Reserved.
4  * 
5  * This software has been released under the terms of the IBM Public
6  * License.  For details, see the LICENSE file in the top-level source
7  * directory or online at http://www.openafs.org/dl/license10.html
8  */
9
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
11
12 #include <afs/param.h>
13 #include <afs/stds.h>
14
15 #ifndef DJGPP
16 #include <windows.h>
17 #endif
18 #include <osi.h>
19 #include <malloc.h>
20 #include <stdio.h>
21 #include <assert.h>
22
23 #include "afsd.h"
24
25 extern void afsi_log(char *pattern, ...);
26
27 /* This module implements the buffer package used by the local transaction
28  * system (cm).  It is initialized by calling cm_Init, which calls buf_Init;
29  * it must be initalized before any of its main routines are called.
30  *
31  * Each buffer is hashed into a hash table by file ID and offset, and if its
32  * reference count is zero, it is also in a free list.
33  *
34  * There are two locks involved in buffer processing.  The global lock
35  * buf_globalLock protects all of the global variables defined in this module,
36  * the reference counts and hash pointers in the actual cm_buf_t structures,
37  * and the LRU queue pointers in the buffer structures.
38  *
39  * The mutexes in the buffer structures protect the remaining fields in the
40  * buffers, as well the data itself.
41  * 
42  * The locking hierarchy here is this:
43  * 
44  * - resv multiple simul. buffers reservation
45  * - lock buffer I/O flags
46  * - lock buffer's mutex
47  * - lock buf_globalLock
48  *
49  */
50
51 /* global debugging log */
52 osi_log_t *buf_logp = NULL;
53
54 /* Global lock protecting hash tables and free lists */
55 osi_rwlock_t buf_globalLock;
56
57 /* ptr to head of the free list (most recently used) and the
58  * tail (the guy to remove first).  We use osi_Q* functions
59  * to put stuff in buf_freeListp, and maintain the end
60  * pointer manually
61  */
62 cm_buf_t *buf_freeListp;
63 cm_buf_t *buf_freeListEndp;
64
65 /* a pointer to a list of all buffers, just so that we can find them
66  * easily for debugging, and for the incr syncer.  Locked under
67  * the global lock.
68  */
69 cm_buf_t *buf_allp;
70
71 /* defaults setup; these variables may be manually assigned into
72  * before calling cm_Init, as a way of changing these defaults.
73  */
74 long buf_nbuffers = CM_BUF_BUFFERS;
75 long buf_nOrigBuffers;
76 long buf_bufferSize = CM_BUF_SIZE;
77 long buf_hashSize = CM_BUF_HASHSIZE;
78
79 #ifndef DJGPP
80 static
81 HANDLE CacheHandle;
82
83 static
84 SYSTEM_INFO sysInfo;
85 #endif /* !DJGPP */
86
87 /* buffer reservation variables */
88 long buf_reservedBufs;
89 long buf_maxReservedBufs;
90 int buf_reserveWaiting;
91
92 /* callouts for reading and writing data, etc */
93 cm_buf_ops_t *cm_buf_opsp;
94
95 /* pointer to hash table; size computed dynamically */
96 cm_buf_t **buf_hashTablepp;
97
98 /* another hash table */
99 cm_buf_t **buf_fileHashTablepp;
100
101 #ifdef DISKCACHE95
102 /* for experimental disk caching support in Win95 client */
103 cm_buf_t *buf_diskFreeListp;
104 cm_buf_t *buf_diskFreeListEndp;
105 cm_buf_t *buf_diskAllp;
106 extern int cm_diskCacheEnabled;
107 #endif /* DISKCACHE95 */
108
109 /* hold a reference to an already held buffer */
110 void buf_Hold(cm_buf_t *bp)
111 {
112         lock_ObtainWrite(&buf_globalLock);
113         bp->refCount++;
114         lock_ReleaseWrite(&buf_globalLock);
115 }
116
117 /* incremental sync daemon.  Writes 1/10th of all the buffers every 5000 ms */
118 void buf_IncrSyncer(long parm)
119 {
120         cm_buf_t *bp;                   /* buffer we're hacking on; held */
121         long i;                         /* counter */
122         long nAtOnce;                   /* how many to do at once */
123         cm_req_t req;
124
125         lock_ObtainWrite(&buf_globalLock);
126         bp = buf_allp;
127         bp->refCount++;
128         lock_ReleaseWrite(&buf_globalLock);
129         nAtOnce = buf_nbuffers / 10;
130         while (1) {
131 #ifndef DJGPP
132                 i = SleepEx(5000, 1);
133                 if (i != 0) continue;
134 #else
135                 thrd_Sleep(5000);
136 #endif /* DJGPP */
137                 
138                 /* now go through our percentage of the buffers */
139                 for(i=0; i<nAtOnce; i++) {
140                         /* don't want its identity changing while we're
141                          * messing with it, so must do all of this with
142                          * bp held.
143                          */
144
145                         /* start cleaning the buffer; don't touch log pages since
146                          * the log code counts on knowing exactly who is writing
147                          * a log page at any given instant.
148                          */
149                         cm_InitReq(&req);
150                         req.flags |= CM_REQ_NORETRY;
151                         buf_CleanAsync(bp, &req);
152
153                         /* now advance to the next buffer; the allp chain never changes,
154                          * and so can be followed even when holding no locks.
155                          */
156                         lock_ObtainWrite(&buf_globalLock);
157                         buf_LockedRelease(bp);
158                         bp = bp->allp;
159                         if (!bp) bp = buf_allp;
160                         bp->refCount++;
161                         lock_ReleaseWrite(&buf_globalLock);
162                 }       /* for loop over a bunch of buffers */
163         }               /* whole daemon's while loop */
164 }
165
166 #ifndef DJGPP
167 /* Create a security attribute structure suitable for use when the cache file
168  * is created.  What we mainly want is that only the administrator should be
169  * able to do anything with the file.  We create an ACL with only one entry,
170  * an entry that grants all rights to the administrator.
171  */
172 PSECURITY_ATTRIBUTES CreateCacheFileSA()
173 {
174         PSECURITY_ATTRIBUTES psa;
175         PSECURITY_DESCRIPTOR psd;
176         SID_IDENTIFIER_AUTHORITY authority = SECURITY_NT_AUTHORITY;
177         PSID AdminSID;
178         DWORD AdminSIDlength;
179         PACL AdminOnlyACL;
180         DWORD ACLlength;
181
182         /* Get Administrator SID */
183         AllocateAndInitializeSid(&authority, 2,
184                                  SECURITY_BUILTIN_DOMAIN_RID,
185                                  DOMAIN_ALIAS_RID_ADMINS,
186                                  0, 0, 0, 0, 0, 0,
187                                  &AdminSID);
188
189         /* Create Administrator-only ACL */
190         AdminSIDlength = GetLengthSid(AdminSID);
191         ACLlength = sizeof(ACL) + sizeof(ACCESS_ALLOWED_ACE)
192                         + AdminSIDlength - sizeof(DWORD);
193         AdminOnlyACL = GlobalAlloc(GMEM_FIXED, ACLlength);
194         InitializeAcl(AdminOnlyACL, ACLlength, ACL_REVISION);
195         AddAccessAllowedAce(AdminOnlyACL, ACL_REVISION,
196                             STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL,
197                             AdminSID);
198
199         /* Create security descriptor */
200         psd = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_DESCRIPTOR));
201         InitializeSecurityDescriptor(psd, SECURITY_DESCRIPTOR_REVISION);
202         SetSecurityDescriptorDacl(psd, TRUE, AdminOnlyACL, FALSE);
203
204         /* Create security attributes structure */
205         psa = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_ATTRIBUTES));
206         psa->nLength = sizeof(SECURITY_ATTRIBUTES);
207         psa->lpSecurityDescriptor = psd;
208         psa->bInheritHandle = TRUE;
209
210         return psa;
211 }
212 #endif /* !DJGPP */
213
214 #ifndef DJGPP
215 /* Free a security attribute structure created by CreateCacheFileSA() */
216 VOID FreeCacheFileSA(PSECURITY_ATTRIBUTES psa)
217 {
218         BOOL b1, b2;
219         PACL pAcl;
220
221         GetSecurityDescriptorDacl(psa->lpSecurityDescriptor, &b1, &pAcl, &b2);
222         GlobalFree(pAcl);
223         GlobalFree(psa->lpSecurityDescriptor);
224         GlobalFree(psa);
225 }
226 #endif /* !DJGPP */
227         
228 /* initialize the buffer package; called with no locks
229  * held during the initialization phase.
230  */
231 long buf_Init(cm_buf_ops_t *opsp)
232 {
233         static osi_once_t once;
234         cm_buf_t *bp;
235         long sectorSize;
236         thread_t phandle;
237 #ifndef DJGPP
238         HANDLE hf, hm;
239         PSECURITY_ATTRIBUTES psa;
240 #endif /* !DJGPP */
241         long i;
242         unsigned long pid;
243         char *data;
244         long cs;
245
246 #ifndef DJGPP
247         /* Get system info; all we really want is the allocation granularity */ 
248         GetSystemInfo(&sysInfo);
249 #endif /* !DJGPP */
250
251         /* Have to be able to reserve a whole chunk */
252         if (((buf_nbuffers - 3) * buf_bufferSize) < cm_chunkSize)
253                 return CM_ERROR_TOOFEWBUFS;
254
255         /* recall for callouts */
256         cm_buf_opsp = opsp;
257
258         if (osi_Once(&once)) {
259                 /* initialize global locks */
260                 lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
261
262 #ifndef DJGPP
263                 /*
264                  * Cache file mapping constrained by
265                  * system allocation granularity;
266                  * round up, assuming granularity is a power of two
267                  */
268                 cs = buf_nbuffers * buf_bufferSize;
269                 cs = (cs + (sysInfo.dwAllocationGranularity - 1))
270                         & ~(sysInfo.dwAllocationGranularity - 1);
271                 if (cs != buf_nbuffers * buf_bufferSize) {
272                         buf_nbuffers = cs / buf_bufferSize;
273                         afsi_log("Cache size rounded up to %d buffers",
274                                  buf_nbuffers);
275                 }
276 #endif /* !DJGPP */
277
278                 /* remember this for those who want to reset it */
279                 buf_nOrigBuffers = buf_nbuffers;
280
281                 /* lower hash size to a prime number */
282                 buf_hashSize = osi_PrimeLessThan(buf_hashSize);
283
284                 /* create hash table */
285                 buf_hashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
286                 memset((void *)buf_hashTablepp, 0,
287                         buf_hashSize * sizeof(cm_buf_t *));
288
289                 /* another hash table */
290                 buf_fileHashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
291                 memset((void *)buf_fileHashTablepp, 0,
292                         buf_hashSize * sizeof(cm_buf_t *));
293                 
294                 /* min value for which this works */
295                 sectorSize = 1;
296
297 #ifndef DJGPP
298                 /* Reserve buffer space by mapping cache file */
299                 psa = CreateCacheFileSA();
300                 hf = CreateFile(cm_CachePath,
301                         GENERIC_READ | GENERIC_WRITE,
302                         FILE_SHARE_READ | FILE_SHARE_WRITE,
303                         psa,
304                         OPEN_ALWAYS,
305                         FILE_ATTRIBUTE_NORMAL,
306                         NULL);
307                 if (hf == INVALID_HANDLE_VALUE) {
308                         afsi_log("create file error %d", GetLastError());
309                         return CM_ERROR_INVAL;
310                 }
311                 FreeCacheFileSA(psa);
312                 CacheHandle = hf;
313                 hm = CreateFileMapping(hf,
314                         NULL,
315                         PAGE_READWRITE,
316                         0, buf_nbuffers * buf_bufferSize,
317                         NULL);
318                 if (hm == NULL) {
319                         if (GetLastError() == ERROR_DISK_FULL) {
320                                 afsi_log("Error creating cache file mapping: disk full");
321                                 return CM_ERROR_TOOMANYBUFS;
322                         }
323                         return CM_ERROR_INVAL;
324                 }
325                 data = MapViewOfFile(hm,
326                         FILE_MAP_ALL_ACCESS,
327                         0, 0,
328                         buf_nbuffers * buf_bufferSize);
329                 if (data == NULL) {
330                         CloseHandle(hf);
331                         CloseHandle(hm);
332                         return CM_ERROR_INVAL;
333                 }
334                 CloseHandle(hm);
335 #else
336                 /* djgpp doesn't support memory mapped files */
337                 data = malloc(buf_nbuffers * buf_bufferSize);
338 #endif /* !DJGPP */
339
340                 /* create buffer headers and put in free list */
341                 bp = malloc(buf_nbuffers * sizeof(cm_buf_t));
342                 buf_allp = NULL;
343                 for(i=0; i<buf_nbuffers; i++) {
344                         /* allocate and zero some storage */
345                         memset(bp, 0, sizeof(cm_buf_t));
346
347                         /* thread on list of all buffers */
348                         bp->allp = buf_allp;
349                         buf_allp = bp;
350                         
351                         osi_QAdd((osi_queue_t **)&buf_freeListp, &bp->q);
352                         bp->flags |= CM_BUF_INLRU;
353                         lock_InitializeMutex(&bp->mx, "Buffer mutex");
354
355                         /* grab appropriate number of bytes from aligned zone */
356                         bp->datap = data;
357
358                         /* setup last buffer pointer */
359                         if (i == 0)
360                                 buf_freeListEndp = bp;
361
362                         /* next */
363                         bp++;
364                         data += buf_bufferSize;
365                 }
366                 
367                 /* none reserved at first */
368                 buf_reservedBufs = 0;
369                 
370                 /* just for safety's sake */
371                 buf_maxReservedBufs = buf_nbuffers - 3;
372                 
373                 /* init the buffer trace log */
374                 buf_logp = osi_LogCreate("buffer", 10);
375
376                 osi_EndOnce(&once);
377                 
378                 /* and create the incr-syncer */
379                 phandle = thrd_Create(0, 0,
380                                       (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
381                                       "buf_IncrSyncer");
382
383                 osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
384 #ifndef DJGPP
385                 CloseHandle(phandle);
386 #endif /* !DJGPP */
387         }
388
389         return 0;
390 }
391
392 /* add nbuffers to the buffer pool, if possible.
393  * Called with no locks held.
394  */
395 long buf_AddBuffers(long nbuffers)
396 {
397         cm_buf_t *bp;
398         int i;
399         char *data;
400 #ifndef DJGPP
401         HANDLE hm;
402         long cs;
403
404     afsi_log("%d buffers being added to the existing cache of size %d",
405               nbuffers, buf_nbuffers);
406
407         /*
408          * Cache file mapping constrained by
409          * system allocation granularity;
410          * round up, assuming granularity is a power of two;
411          * assume existing cache size is already rounded
412          */
413         cs = nbuffers * buf_bufferSize;
414         cs = (cs + (sysInfo.dwAllocationGranularity - 1))
415                 & ~(sysInfo.dwAllocationGranularity - 1);
416         if (cs != nbuffers * buf_bufferSize) {
417                 nbuffers = cs / buf_bufferSize;
418         }
419
420         /* Reserve additional buffer space by remapping cache file */
421         hm = CreateFileMapping(CacheHandle,
422                 NULL,
423                 PAGE_READWRITE,
424                 0, (buf_nbuffers + nbuffers) * buf_bufferSize,
425                 NULL);
426         if (hm == NULL) {
427                 if (GetLastError() == ERROR_DISK_FULL)
428                         return CM_ERROR_TOOMANYBUFS;
429                 else
430                         return CM_ERROR_INVAL;
431         }
432         data = MapViewOfFile(hm,
433                 FILE_MAP_ALL_ACCESS,
434                 0, buf_nbuffers * buf_bufferSize,
435                 nbuffers * buf_bufferSize);
436         if (data == NULL) {
437                 CloseHandle(hm);
438                 return CM_ERROR_INVAL;
439         }
440         CloseHandle(hm);
441 #else
442         data = malloc(buf_nbuffers * buf_bufferSize);
443 #endif /* DJGPP */
444
445         /* Create buffer headers and put in free list */
446         bp = malloc(nbuffers * sizeof(*bp));
447
448         for(i=0; i<nbuffers; i++) {
449                 memset(bp, 0, sizeof(*bp));
450         
451                 lock_InitializeMutex(&bp->mx, "cm_buf_t");
452
453                 /* grab appropriate number of bytes from aligned zone */
454                 bp->datap = data;
455
456                 bp->flags |= CM_BUF_INLRU;
457                 
458                 lock_ObtainWrite(&buf_globalLock);
459                 /* note that buf_allp chain is covered by buf_globalLock now */
460                 bp->allp = buf_allp;
461                 buf_allp = bp;
462                 osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
463                 if (!buf_freeListEndp) buf_freeListEndp = bp;
464                 buf_nbuffers++;
465                 lock_ReleaseWrite(&buf_globalLock);
466
467                 bp++;
468                 data += buf_bufferSize;
469         
470         }        /* for loop over all buffers */
471
472         return 0;
473 }
474
475 /* interface to set the number of buffers to an exact figure.
476  * Called with no locks held.
477  */
478 long buf_SetNBuffers(long nbuffers)
479 {
480         if (nbuffers < 10) return CM_ERROR_INVAL;
481         if (nbuffers == buf_nbuffers) return 0;
482         else if (nbuffers > buf_nbuffers)
483                 return buf_AddBuffers(nbuffers - buf_nbuffers);
484         else return CM_ERROR_INVAL;
485 }
486
487 /* release a buffer.  Buffer must be referenced, but unlocked. */
488 void buf_Release(cm_buf_t *bp)
489 {
490         lock_ObtainWrite(&buf_globalLock);
491         buf_LockedRelease(bp);
492         lock_ReleaseWrite(&buf_globalLock);
493 }
494
495 /* wait for reading or writing to clear; called with write-locked
496  * buffer, and returns with locked buffer.
497  */
498 void buf_WaitIO(cm_buf_t *bp)
499 {
500         while (1) {
501                 /* if no IO is happening, we're done */
502                 if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
503                         break;
504                 
505         /* otherwise I/O is happening, but some other thread is waiting for
506          * the I/O already.  Wait for that guy to figure out what happened,
507          * and then check again.
508          */
509         if ( bp->flags & CM_BUF_WAITING ) 
510             osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING already set for 0x%x", bp);
511
512         bp->flags |= CM_BUF_WAITING;
513         osi_SleepM((long) bp, &bp->mx);
514         lock_ObtainMutex(&bp->mx);
515                 osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%x", bp);
516     }
517         
518     /* if we get here, the IO is done, but we may have to wakeup people waiting for
519      * the I/O to complete.  Do so.
520      */
521     if (bp->flags & CM_BUF_WAITING) {
522                 bp->flags &= ~CM_BUF_WAITING;
523         osi_Wakeup((long) bp);
524     }
525     osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%x", (long) bp);
526 }
527
528 /* code to drop reference count while holding buf_globalLock */
529 void buf_LockedRelease(cm_buf_t *bp)
530 {
531         /* ensure that we're in the LRU queue if our ref count is 0 */
532         osi_assert(bp->refCount > 0);
533         if (--bp->refCount == 0) {
534                 if (!(bp->flags & CM_BUF_INLRU)) {
535                         osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
536
537                         /* watch for transition from empty to one element */
538                         if (!buf_freeListEndp)
539                                 buf_freeListEndp = buf_freeListp;
540                         bp->flags |= CM_BUF_INLRU;
541                 }
542         }
543 }
544
545 /* find a buffer, if any, for a particular file ID and offset.  Assumes
546  * that buf_globalLock is write locked when called.
547  */
548 cm_buf_t *buf_LockedFind(struct cm_scache *scp, osi_hyper_t *offsetp)
549 {
550         long i;
551         cm_buf_t *bp;
552         
553         i = BUF_HASH(&scp->fid, offsetp);
554         for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) {
555                 if (cm_FidCmp(&scp->fid, &bp->fid) == 0
556                         && offsetp->LowPart == bp->offset.LowPart
557                         && offsetp->HighPart == bp->offset.HighPart) {
558                         bp->refCount++;
559                         break;
560                 }
561         }
562         
563         /* return whatever we found, if anything */
564         return bp;
565 }
566
567 /* find a buffer with offset *offsetp for vnode *scp.  Called
568  * with no locks held.
569  */
570 cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
571 {
572         cm_buf_t *bp;
573
574         lock_ObtainWrite(&buf_globalLock);
575         bp = buf_LockedFind(scp, offsetp);
576         lock_ReleaseWrite(&buf_globalLock);
577
578         return bp;
579 }
580
581 /* start cleaning I/O on this buffer.  Buffer must be write locked, and is returned
582  * write-locked.
583  *
584  * Makes sure that there's only one person writing this block
585  * at any given time, and also ensures that the log is forced sufficiently far,
586  * if this buffer contains logged data.
587  */
588 void buf_LockedCleanAsync(cm_buf_t *bp, cm_req_t *reqp)
589 {
590         long code;
591
592         code = 0;
593         while ((bp->flags & (CM_BUF_WRITING | CM_BUF_DIRTY)) == CM_BUF_DIRTY) {
594                 lock_ReleaseMutex(&bp->mx);
595
596                 code = (*cm_buf_opsp->Writep)(&bp->fid, &bp->offset,
597                                                 buf_bufferSize, 0, bp->userp,
598                                                 reqp);
599                 
600                 lock_ObtainMutex(&bp->mx);
601                 if (code) break;
602
603 #ifdef DISKCACHE95
604                 /* Disk cache support */
605                 /* write buffer to disk cache (synchronous for now) */
606                 diskcache_Update(bp->dcp, bp->datap, buf_bufferSize, bp->dataVersion);
607 #endif /* DISKCACHE95 */
608         };
609
610         /* do logging after call to GetLastError, or else */
611         osi_Log2(buf_logp, "buf_CleanAsync starts I/O on 0x%x, done=%d", bp, code);
612         
613         /* if someone was waiting for the I/O that just completed or failed,
614          * wake them up.
615          */
616         if (bp->flags & CM_BUF_WAITING) {
617                 /* turn off flags and wakeup users */
618                 bp->flags &= ~CM_BUF_WAITING;
619                 osi_Wakeup((long) bp);
620         }
621 }
622
623 /* Called with a zero-ref count buffer and with the buf_globalLock write locked.
624  * recycles the buffer, and leaves it ready for reuse with a ref count of 1.
625  * The buffer must already be clean, and no I/O should be happening to it.
626  */
627 void buf_Recycle(cm_buf_t *bp)
628 {
629         int i;
630         cm_buf_t **lbpp;
631         cm_buf_t *tbp;
632         cm_buf_t *prevBp, *nextBp;
633
634         /* if we get here, we know that the buffer still has a 0 ref count,
635          * and that it is clean and has no currently pending I/O.  This is
636          * the dude to return.
637          * Remember that as long as the ref count is 0, we know that we won't
638          * have any lock conflicts, so we can grab the buffer lock out of
639          * order in the locking hierarchy.
640          */
641         osi_Log2(buf_logp,
642                 "buf_Recycle recycles 0x%x, off 0x%x",
643                 bp, bp->offset.LowPart);
644
645         osi_assert(bp->refCount == 0);
646         osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)));
647         lock_AssertWrite(&buf_globalLock);
648
649         if (bp->flags & CM_BUF_INHASH) {
650                 /* Remove from hash */
651
652                 i = BUF_HASH(&bp->fid, &bp->offset);
653                 lbpp = &(buf_hashTablepp[i]);
654                 for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
655                         if (tbp == bp) break;
656                 }
657
658                 /* we better find it */
659                 osi_assertx(tbp != NULL, "buf_GetNewLocked: hash table screwup");
660
661                 *lbpp = bp->hashp;      /* hash out */
662
663                 /* Remove from file hash */
664
665                 i = BUF_FILEHASH(&bp->fid);
666                 prevBp = bp->fileHashBackp;
667                 nextBp = bp->fileHashp;
668                 if (prevBp)
669                         prevBp->fileHashp = nextBp;
670                 else
671                         buf_fileHashTablepp[i] = nextBp;
672                 if (nextBp)
673                         nextBp->fileHashBackp = prevBp;
674
675                 bp->flags &= ~CM_BUF_INHASH;
676         }
677                         
678         /* bump the soft reference counter now, to invalidate softRefs; no
679          * wakeup is required since people don't sleep waiting for this
680          * counter to change.
681          */
682         bp->idCounter++;
683
684         /* make the fid unrecognizable */
685         memset(&bp->fid, 0, sizeof(bp->fid));
686 }
687
688 /* recycle a buffer, removing it from the free list, hashing in its new identity
689  * and returning it write-locked so that no one can use it.  Called without
690  * any locks held, and can return an error if it loses the race condition and 
691  * finds that someone else created the desired buffer.
692  *
693  * If success is returned, the buffer is returned write-locked.
694  *
695  * May be called with null scp and offsetp, if we're just trying to reclaim some
696  * space from the buffer pool.  In that case, the buffer will be returned
697  * without being hashed into the hash table.
698  */
699 long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
700 {
701         cm_buf_t *bp;           /* buffer we're dealing with */
702         cm_buf_t *nextBp;       /* next buffer in file hash chain */
703         long i;                 /* temp */
704         cm_req_t req;
705
706         cm_InitReq(&req);       /* just in case */
707
708         while(1) {
709 retry:
710                 lock_ObtainWrite(&buf_globalLock);
711                 /* check to see if we lost the race */
712                 if (scp) {
713                         if (bp = buf_LockedFind(scp, offsetp)) {
714                                 bp->refCount--;
715                                 lock_ReleaseWrite(&buf_globalLock);
716                                 return CM_BUF_EXISTS;
717                         }
718                 }
719                 
720                 /* for debugging, assert free list isn't empty, although we
721                  * really should try waiting for a running tranasction to finish
722                  * instead of this; or better, we should have a transaction
723                  * throttler prevent us from entering this situation.
724                  */
725                 osi_assertx(buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
726
727                 /* look at all buffers in free list, some of which may temp.
728                  * have high refcounts and which then should be skipped,
729                  * starting cleaning I/O for those which are dirty.  If we find
730                  * a clean buffer, we rehash it, lock it and return it.
731                  */
732                 for(bp = buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
733                         /* check to see if it really has zero ref count.  This
734                          * code can bump refcounts, at least, so it may not be
735                          * zero.
736                          */
737                         if (bp->refCount > 0) continue;
738                         
739                         /* we don't have to lock buffer itself, since the ref
740                          * count is 0 and we know it will stay zero as long as
741                          * we hold the global lock.
742                          */
743
744                         /* don't recycle someone in our own chunk */
745                         if (!cm_FidCmp(&bp->fid, &scp->fid)
746                             && (bp->offset.LowPart & (-cm_chunkSize))
747                                   == (offsetp->LowPart & (-cm_chunkSize)))
748                                 continue;
749
750                         /* if this page is being filled (!) or cleaned, see if
751                          * the I/O has completed.  If not, skip it, otherwise
752                          * do the final processing for the I/O.
753                          */
754                         if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
755                                 /* probably shouldn't do this much work while
756                                  * holding the big lock?  Watch for contention
757                                  * here.
758                                  */
759                                 continue;
760                         }
761                         
762                         if (bp->flags & CM_BUF_DIRTY) {
763                                 /* if the buffer is dirty, start cleaning it and
764                                  * move on to the next buffer.  We do this with
765                                  * just the lock required to minimize contention
766                                  * on the big lock.
767                                  */
768                                 bp->refCount++;
769                                 lock_ReleaseWrite(&buf_globalLock);
770
771                                 /* grab required lock and clean; this only
772                                  * starts the I/O.  By the time we're back,
773                                  * it'll still be marked dirty, but it will also
774                                  * have the WRITING flag set, so we won't get
775                                  * back here.
776                                  */
777                                 buf_CleanAsync(bp, &req);
778                                 
779                                 /* now put it back and go around again */
780                                 buf_Release(bp);
781                                 goto retry;
782                         }
783                         
784                         /* if we get here, we know that the buffer still has a 0
785                          * ref count, and that it is clean and has no currently
786                          * pending I/O.  This is the dude to return.
787                          * Remember that as long as the ref count is 0, we know
788                          * that we won't have any lock conflicts, so we can grab
789                          * the buffer lock out of order in the locking hierarchy.
790                          */
791                         buf_Recycle(bp);
792
793                         /* clean up junk flags */
794                         bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
795                         bp->dataVersion = -1;   /* unknown so far */
796
797                         /* now hash in as our new buffer, and give it the
798                          * appropriate label, if requested.
799                          */
800                         if (scp) {
801                                 bp->flags |= CM_BUF_INHASH;
802                                 bp->fid = scp->fid;
803                                 bp->offset = *offsetp;
804                                 i = BUF_HASH(&scp->fid, offsetp);
805                                 bp->hashp = buf_hashTablepp[i];
806                                 buf_hashTablepp[i] = bp;
807                                 i = BUF_FILEHASH(&scp->fid);
808                                 nextBp = buf_fileHashTablepp[i];
809                                 bp->fileHashp = nextBp;
810                                 bp->fileHashBackp = NULL;
811                                 if (nextBp)
812                                         nextBp->fileHashBackp = bp;
813                                 buf_fileHashTablepp[i] = bp;
814                         }
815                         
816                         /* prepare to return it.  Start by giving it a good
817                          * refcount */
818                         bp->refCount = 1;
819                         
820                         /* and since it has a non-zero ref count, we should move
821                          * it from the lru queue.  It better be still there,
822                          * since we've held the global (big) lock since we found
823                          * it there.
824                          */
825                         osi_assertx(bp->flags & CM_BUF_INLRU,
826                                     "buf_GetNewLocked: LRU screwup");
827                         if (buf_freeListEndp == bp) {
828                                 /* we're the last guy in this queue, so maintain it */
829                                 buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
830                         }
831                         osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
832                         bp->flags &= ~CM_BUF_INLRU;
833                         
834                         /* finally, grab the mutex so that people don't use it
835                          * before the caller fills it with data.  Again, no one 
836                          * should have been able to get to this dude to lock it.
837                          */
838                         osi_assertx(lock_TryMutex(&bp->mx),
839                                     "buf_GetNewLocked: TryMutex failed");
840
841                         lock_ReleaseWrite(&buf_globalLock);
842                         *bufpp = bp;
843                         return 0;
844                 } /* for all buffers in lru queue */
845                 lock_ReleaseWrite(&buf_globalLock);
846         }       /* while loop over everything */
847         /* not reached */
848 } /* the proc */
849
850 /* get a page, returning it held but unlocked.  Doesn't fill in the page
851  * with I/O, since we're going to write the whole thing new.
852  */
853 long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
854 {
855         cm_buf_t *bp;
856         long code;
857         osi_hyper_t pageOffset;
858         int created;
859
860         created = 0;
861         pageOffset.HighPart = offsetp->HighPart;
862         pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
863         while (1) {
864                 lock_ObtainWrite(&buf_globalLock);
865                 bp = buf_LockedFind(scp, &pageOffset);
866                 lock_ReleaseWrite(&buf_globalLock);
867                 if (bp) {
868                         /* lock it and break out */
869                         lock_ObtainMutex(&bp->mx);
870                         break;
871                 }
872                 
873                 /* otherwise, we have to create a page */
874                 code = buf_GetNewLocked(scp, &pageOffset, &bp);
875
876                 /* check if the buffer was created in a race condition branch.
877                  * If so, go around so we can hold a reference to it. 
878                  */
879                 if (code == CM_BUF_EXISTS) continue;
880                 
881                 /* something else went wrong */
882                 if (code != 0) return code;
883                 
884                 /* otherwise, we have a locked buffer that we just created */
885                 created = 1;
886                 break;
887         } /* big while loop */
888         
889         /* wait for reads */
890         if (bp->flags & CM_BUF_READING)
891                 buf_WaitIO(bp);
892
893         /* once it has been read once, we can unlock it and return it, still
894          * with its refcount held.
895          */
896         lock_ReleaseMutex(&bp->mx);
897         *bufpp = bp;
898         osi_Log3(buf_logp, "buf_GetNew returning bp 0x%x for file 0x%x, offset 0x%x",
899                 bp, (long) scp, offsetp->LowPart);
900         return 0;
901 }
902
903 /* get a page, returning it held but unlocked.  Make sure it is complete */
904 long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
905 {
906         cm_buf_t *bp;
907         long code;
908         osi_hyper_t pageOffset;
909         unsigned long tcount;
910         int created;
911 #ifdef DISKCACHE95
912         cm_diskcache_t *dcp;
913 #endif /* DISKCACHE95 */
914
915         created = 0;
916         pageOffset.HighPart = offsetp->HighPart;
917         pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
918         while (1) {
919                 lock_ObtainWrite(&buf_globalLock);
920                 bp = buf_LockedFind(scp, &pageOffset);
921                 lock_ReleaseWrite(&buf_globalLock);
922                 if (bp) {
923                         /* lock it and break out */
924                         lock_ObtainMutex(&bp->mx);
925                         break;
926
927 #ifdef DISKCACHE95
928                         /* touch disk chunk to update LRU info */
929                         diskcache_Touch(bp->dcp);
930 #endif /* DISKCACHE95 */
931                 }
932                 
933                 /* otherwise, we have to create a page */
934                 code = buf_GetNewLocked(scp, &pageOffset, &bp);
935
936                 /* check if the buffer was created in a race condition branch.
937                  * If so, go around so we can hold a reference to it. 
938                  */
939                 if (code == CM_BUF_EXISTS) continue;
940                 
941                 /* something else went wrong */
942                 if (code != 0) return code;
943                 
944                 /* otherwise, we have a locked buffer that we just created */
945                 created = 1;
946                 break;
947         } /* big while loop */
948         
949         /* if we get here, we have a locked buffer that may have just been
950          * created, in which case it needs to be filled with data.
951          */
952         if (created) {
953                 /* load the page; freshly created pages should be idle */
954                 osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)));
955
956                 /* setup offset, event */
957 #ifndef DJGPP  /* doesn't seem to be used */
958                 bp->over.Offset = bp->offset.LowPart;
959                 bp->over.OffsetHigh = bp->offset.HighPart;
960 #endif /* !DJGPP */
961
962                 /* start the I/O; may drop lock */
963                 bp->flags |= CM_BUF_READING;
964                 code = (*cm_buf_opsp->Readp)(bp, buf_bufferSize, &tcount, NULL);
965
966 #ifdef DISKCACHE95
967                 code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, buf_bufferSize, &bp->dataVersion, &tcount, &dcp);
968                 bp->dcp = dcp;    /* pointer to disk cache struct. */
969 #endif /* DISKCACHE95 */
970
971                 if (code != 0) {
972                         /* failure or queued */
973 #ifndef DJGPP   /* cm_bufRead always returns 0 */
974                         if (code != ERROR_IO_PENDING) {
975 #endif
976                                 bp->error = code;
977                                 bp->flags |= CM_BUF_ERROR;
978                                 bp->flags &= ~CM_BUF_READING;
979                                 if (bp->flags & CM_BUF_WAITING) {
980                                         bp->flags &= ~CM_BUF_WAITING;
981                                         osi_Wakeup((long) bp);
982                                 }
983                                 lock_ReleaseMutex(&bp->mx);
984                                 buf_Release(bp);
985                                 return code;
986 #ifndef DJGPP
987                         }
988 #endif
989                 } else {
990                         /* otherwise, I/O completed instantly and we're done, except
991                          * for padding the xfr out with 0s and checking for EOF
992                          */
993                         if (tcount < (unsigned long) buf_bufferSize) {
994                                 memset(bp->datap+tcount, 0, buf_bufferSize - tcount);
995                                 if (tcount == 0)
996                                         bp->flags |= CM_BUF_EOF;
997                         }
998                         bp->flags &= ~CM_BUF_READING;
999                         if (bp->flags & CM_BUF_WAITING) {
1000                                 bp->flags &= ~CM_BUF_WAITING;
1001                                 osi_Wakeup((long) bp);
1002                         }
1003                 }
1004                         
1005         } /* if created */
1006         
1007         /* wait for reads, either that which we started above, or that someone
1008          * else started.  We don't care if we return a buffer being cleaned.
1009          */
1010         if (bp->flags & CM_BUF_READING)
1011                 buf_WaitIO(bp);
1012
1013         /* once it has been read once, we can unlock it and return it, still
1014          * with its refcount held.
1015          */
1016         lock_ReleaseMutex(&bp->mx);
1017         *bufpp = bp;
1018
1019         /* now remove from queue; will be put in at the head (farthest from
1020          * being recycled) when we're done in buf_Release.
1021          */
1022         lock_ObtainWrite(&buf_globalLock);
1023         if (bp->flags & CM_BUF_INLRU) {
1024                 if (buf_freeListEndp == bp)
1025                         buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
1026                 osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
1027                 bp->flags &= ~CM_BUF_INLRU;
1028         }
1029         lock_ReleaseWrite(&buf_globalLock);
1030
1031         osi_Log3(buf_logp, "buf_Get returning bp 0x%x for file 0x%x, offset 0x%x",
1032                 bp, (long) scp, offsetp->LowPart);
1033         return 0;
1034 }
1035
1036 /* count # of elements in the free list;
1037  * we don't bother doing the proper locking for accessing dataVersion or flags
1038  * since it is a pain, and this is really just an advisory call.  If you need
1039  * to do better at some point, rewrite this function.
1040  */
1041 long buf_CountFreeList(void)
1042 {
1043         long count;
1044         cm_buf_t *bufp;
1045
1046         count = 0;
1047         lock_ObtainRead(&buf_globalLock);
1048         for(bufp = buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
1049                 /* if the buffer doesn't have an identity, or if the buffer
1050                  * has been invalidate (by having its DV stomped upon), then
1051                  * count it as free, since it isn't really being utilized.
1052                  */
1053                 if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
1054                         count++;
1055         }
1056         lock_ReleaseRead(&buf_globalLock);
1057         return count;
1058 }
1059
1060 /* clean a buffer synchronously */
1061 void buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
1062 {
1063         lock_ObtainMutex(&bp->mx);
1064         buf_LockedCleanAsync(bp, reqp);
1065         lock_ReleaseMutex(&bp->mx);
1066 }
1067
1068 /* wait for a buffer's cleaning to finish */
1069 void buf_CleanWait(cm_buf_t *bp)
1070 {
1071         lock_ObtainMutex(&bp->mx);
1072         if (bp->flags & CM_BUF_WRITING) {
1073                 buf_WaitIO(bp);
1074         }
1075         lock_ReleaseMutex(&bp->mx);
1076 }
1077
1078 /* set the dirty flag on a buffer, and set associated write-ahead log,
1079  * if there is one.  Allow one to be added to a buffer, but not changed.
1080  *
1081  * The buffer must be locked before calling this routine.
1082  */
1083 void buf_SetDirty(cm_buf_t *bp)
1084 {
1085         osi_assert(bp->refCount > 0);
1086         
1087         osi_Log1(buf_logp, "buf_SetDirty 0x%x", bp);
1088
1089         /* set dirty bit */
1090         bp->flags |= CM_BUF_DIRTY;
1091
1092         /* and turn off EOF flag, since it has associated data now */
1093         bp->flags &= ~CM_BUF_EOF;
1094 }
1095
1096 /* clean all buffers, reset log pointers and invalidate all buffers.
1097  * Called with no locks held, and returns with same.
1098  *
1099  * This function is guaranteed to clean and remove the log ptr of all the
1100  * buffers that were dirty or had non-zero log ptrs before the call was
1101  * made.  That's sufficient to clean up any garbage left around by recovery,
1102  * which is all we're counting on this for; there may be newly created buffers
1103  * added while we're running, but that should be OK.
1104  *
1105  * In an environment where there are no transactions (artificially imposed, for
1106  * example, when switching the database to raw mode), this function is used to
1107  * make sure that all updates have been written to the disk.  In that case, we don't
1108  * really require that we forget the log association between pages and logs, but
1109  * it also doesn't hurt.  Since raw mode I/O goes through this buffer package, we don't
1110  * have to worry about invalidating data in the buffers.
1111  *
1112  * This function is used at the end of recovery as paranoia to get the recovered
1113  * database out to disk.  It removes all references to the recovery log and cleans
1114  * all buffers.
1115  */
1116 long buf_CleanAndReset(void)
1117 {
1118         long i;
1119         cm_buf_t *bp;
1120         cm_req_t req;
1121
1122         lock_ObtainWrite(&buf_globalLock);
1123         for(i=0; i<buf_hashSize; i++) {
1124                 for(bp = buf_hashTablepp[i]; bp; bp = bp->hashp) {
1125                         bp->refCount++;
1126                         lock_ReleaseWrite(&buf_globalLock);
1127                         
1128                         /* now no locks are held; clean buffer and go on */
1129                         cm_InitReq(&req);
1130                         buf_CleanAsync(bp, &req);
1131                         buf_CleanWait(bp);
1132                         
1133                         /* relock and release buffer */
1134                         lock_ObtainWrite(&buf_globalLock);
1135                         buf_LockedRelease(bp);
1136                 } /* over one bucket */
1137         }       /* for loop over all hash buckets */
1138         
1139         /* release locks */
1140         lock_ReleaseWrite(&buf_globalLock);
1141
1142         /* and we're done */
1143         return 0;
1144 }
1145
1146 /* called without global lock being held, reserves buffers for callers
1147  * that need more than one held (not locked) at once.
1148  */
1149 void buf_ReserveBuffers(long nbuffers)
1150 {
1151         lock_ObtainWrite(&buf_globalLock);
1152         while (1) {
1153                 if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
1154                         buf_reserveWaiting = 1;
1155                         osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
1156                         osi_SleepW((long) &buf_reservedBufs, &buf_globalLock);
1157                         lock_ObtainWrite(&buf_globalLock);
1158                 }
1159                 else {
1160                         buf_reservedBufs += nbuffers;
1161                         break;
1162                 }
1163         }
1164         lock_ReleaseWrite(&buf_globalLock);
1165 }
1166
1167 int buf_TryReserveBuffers(long nbuffers)
1168 {
1169         int code;
1170
1171         lock_ObtainWrite(&buf_globalLock);
1172         if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
1173                 code = 0;
1174         }
1175         else {
1176                 buf_reservedBufs += nbuffers;
1177                 code = 1;
1178         }
1179         lock_ReleaseWrite(&buf_globalLock);
1180         return code;
1181 }
1182
1183 /* called without global lock held, releases reservation held by
1184  * buf_ReserveBuffers.
1185  */
1186 void buf_UnreserveBuffers(long nbuffers)
1187 {
1188         lock_ObtainWrite(&buf_globalLock);
1189         buf_reservedBufs -= nbuffers;
1190         if (buf_reserveWaiting) {
1191                 buf_reserveWaiting = 0;
1192                 osi_Wakeup((long) &buf_reservedBufs);
1193         }
1194         lock_ReleaseWrite(&buf_globalLock);
1195 }
1196
1197 /* truncate the buffers past sizep, zeroing out the page, if we don't
1198  * end on a page boundary.
1199  *
1200  * Requires cm_bufCreateLock to be write locked.
1201  */
1202 long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
1203         osi_hyper_t *sizep)
1204 {
1205         cm_buf_t *bufp;
1206         cm_buf_t *nbufp;                        /* next buffer, if didRelease */
1207         osi_hyper_t bufEnd;
1208         long code;
1209         long bufferPos;
1210         int didRelease;
1211         long i;
1212         
1213         /* assert that cm_bufCreateLock is held in write mode */
1214         lock_AssertWrite(&scp->bufCreateLock);
1215
1216         i = BUF_FILEHASH(&scp->fid);
1217
1218         lock_ObtainWrite(&buf_globalLock);
1219         bufp = buf_fileHashTablepp[i];
1220         if (bufp == NULL) {
1221                 lock_ReleaseWrite(&buf_globalLock);
1222                 return 0;
1223         }
1224
1225         bufp->refCount++;
1226         lock_ReleaseWrite(&buf_globalLock);
1227         for(; bufp; bufp = nbufp) {
1228                 didRelease = 0;
1229                 lock_ObtainMutex(&bufp->mx);
1230
1231                 bufEnd.HighPart = 0;
1232                 bufEnd.LowPart = buf_bufferSize;
1233                 bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
1234
1235                 if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
1236                         LargeIntegerLessThan(*sizep, bufEnd)) {
1237                         buf_WaitIO(bufp);
1238                 }
1239                 lock_ObtainMutex(&scp->mx);
1240         
1241                 /* make sure we have a callback (so we have the right value for
1242                  * the length), and wait for it to be safe to do a truncate.
1243                  */
1244                 code = cm_SyncOp(scp, bufp, userp, reqp, 0,
1245                                  CM_SCACHESYNC_NEEDCALLBACK
1246                                  | CM_SCACHESYNC_GETSTATUS
1247                                  | CM_SCACHESYNC_SETSIZE
1248                                  | CM_SCACHESYNC_BUFLOCKED);
1249                 /* if we succeeded in our locking, and this applies to the right
1250                  * file, and the truncate request overlaps the buffer either
1251                  * totally or partially, then do something.
1252                  */
1253                 if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
1254                         && LargeIntegerLessThan(*sizep, bufEnd)) {
1255                         
1256                         lock_ObtainWrite(&buf_globalLock);
1257
1258                         /* destroy the buffer, turning off its dirty bit, if
1259                          * we're truncating the whole buffer.  Otherwise, set
1260                          * the dirty bit, and clear out the tail of the buffer
1261                          * if we just overlap some.
1262                          */
1263                         if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
1264                                 /* truncating the entire page */
1265                                 bufp->flags &= ~CM_BUF_DIRTY;
1266                                 bufp->dataVersion = -1; /* known bad */
1267                                 bufp->dirtyCounter++;
1268                         }
1269                         else {
1270                                 /* don't set dirty, since dirty implies
1271                                  * currently up-to-date.  Don't need to do this,
1272                                  * since we'll update the length anyway.
1273                                  *
1274                                  * Zero out remainder of the page, in case we
1275                                  * seek and write past EOF, and make this data
1276                                  * visible again.
1277                                  */
1278                                 bufferPos = sizep->LowPart & (buf_bufferSize - 1);
1279                                 osi_assert(bufferPos != 0);
1280                                 memset(bufp->datap + bufferPos, 0,
1281                                         buf_bufferSize - bufferPos);
1282                         }
1283
1284                         lock_ReleaseWrite(&buf_globalLock);
1285
1286                 }
1287                 
1288                 lock_ReleaseMutex(&scp->mx);
1289                 lock_ReleaseMutex(&bufp->mx);
1290                 if (!didRelease) {
1291                         lock_ObtainWrite(&buf_globalLock);
1292                         nbufp = bufp->fileHashp;
1293                         if (nbufp) nbufp->refCount++;
1294                         buf_LockedRelease(bufp);
1295                         lock_ReleaseWrite(&buf_globalLock);
1296                 }
1297
1298                 /* bail out early if we fail */
1299                 if (code) {
1300                         /* at this point, nbufp is held; bufp has already been
1301                          * released.
1302                          */
1303                         if (nbufp) buf_Release(nbufp);
1304                         return code;
1305                 }
1306         }
1307         
1308         /* success */
1309         return 0;
1310 }
1311
1312 long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
1313 {
1314         long code;
1315         cm_buf_t *bp;           /* buffer we're hacking on */
1316         cm_buf_t *nbp;
1317         int didRelease;
1318         long i;
1319
1320         i = BUF_FILEHASH(&scp->fid);
1321
1322         code = 0;
1323         lock_ObtainWrite(&buf_globalLock);
1324         bp = buf_fileHashTablepp[i];
1325         if (bp) bp->refCount++;
1326         lock_ReleaseWrite(&buf_globalLock);
1327         for(; bp; bp = nbp) {
1328                 didRelease = 0; /* haven't released this buffer yet */
1329
1330                 /* clean buffer synchronously */
1331                 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1332                         lock_ObtainMutex(&bp->mx);
1333
1334                         /* start cleaning the buffer, and wait for it to finish */
1335                         buf_LockedCleanAsync(bp, reqp);
1336                         buf_WaitIO(bp);
1337                         lock_ReleaseMutex(&bp->mx);
1338
1339                         code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
1340                         if (code) goto skip;
1341
1342                         lock_ObtainWrite(&buf_globalLock);
1343                         /* actually, we only know that buffer is clean if ref
1344                          * count is 1, since we don't have buffer itself locked.
1345                          */
1346                         if (!(bp->flags & CM_BUF_DIRTY)) {
1347                                 if (bp->refCount == 1) {        /* bp is held above */
1348                                         buf_LockedRelease(bp);
1349                                         nbp = bp->fileHashp;
1350                                         if (nbp) nbp->refCount++;
1351                                         didRelease = 1;
1352                                         buf_Recycle(bp);
1353                                 }
1354                         }
1355                         lock_ReleaseWrite(&buf_globalLock);
1356
1357                         (*cm_buf_opsp->Unstabilizep)(scp, userp);
1358                 }
1359
1360 skip:
1361                 if (!didRelease) {
1362                         lock_ObtainWrite(&buf_globalLock);
1363                         if (nbp = bp->fileHashp) nbp->refCount++;
1364                         buf_LockedRelease(bp);
1365                         lock_ReleaseWrite(&buf_globalLock);
1366                 }
1367         }       /* for loop over a bunch of buffers */
1368         
1369         /* done */
1370         return code;
1371 }
1372
1373 long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
1374 {
1375         long code;
1376         cm_buf_t *bp;           /* buffer we're hacking on */
1377     cm_buf_t *nbp;              /* next one */
1378         long i;
1379
1380         i = BUF_FILEHASH(&scp->fid);
1381
1382         code = 0;
1383         lock_ObtainWrite(&buf_globalLock);
1384     bp = buf_fileHashTablepp[i];
1385     if (bp) bp->refCount++;
1386     lock_ReleaseWrite(&buf_globalLock);
1387         for(; bp; bp = nbp) {
1388                 /* clean buffer synchronously */
1389                 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1390                         if (userp) {
1391                 cm_HoldUser(userp);
1392                                 lock_ObtainMutex(&bp->mx);
1393                                 if (bp->userp) 
1394                     cm_ReleaseUser(bp->userp);
1395                 bp->userp = userp;
1396                                 lock_ReleaseMutex(&bp->mx);
1397             }
1398                         buf_CleanAsync(bp, reqp);
1399             buf_CleanWait(bp);
1400             lock_ObtainMutex(&bp->mx);
1401                         if (bp->flags & CM_BUF_ERROR) {
1402                                 if (code == 0 || code == -1) code = bp->error;
1403                 if (code == 0) code = -1;
1404             }
1405             lock_ReleaseMutex(&bp->mx);
1406                 }
1407
1408                 lock_ObtainWrite(&buf_globalLock);
1409                 buf_LockedRelease(bp);
1410         nbp = bp->fileHashp;
1411         if (nbp) nbp->refCount++;
1412                 lock_ReleaseWrite(&buf_globalLock);
1413         }       /* for loop over a bunch of buffers */
1414         
1415     /* done */
1416         return code;
1417 }
1418
1419 /* dump the contents of the buf_hashTablepp. */
1420 int cm_DumpBufHashTable(FILE *outputFile, char *cookie)
1421 {
1422     int zilch;
1423     cm_buf_t *bp;
1424     char output[1024];
1425     int i;
1426   
1427         if (buf_hashTablepp == NULL)
1428                 return -1;
1429
1430     lock_ObtainRead(&buf_globalLock);
1431   
1432     sprintf(output, "%s - dumping buf_HashTable - buf_hashSize=%d\n", cookie, buf_hashSize);
1433     WriteFile(outputFile, output, strlen(output), &zilch, NULL);
1434   
1435     for (i = 0; i < buf_hashSize; i++)
1436     {
1437         for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) 
1438         {
1439             if (bp->refCount)
1440             {
1441                 sprintf(output, "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d,"
1442                         "vnode=%d, unique=%d), size=%d refCount=%d\n", 
1443                         cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume, 
1444                         bp->fid.vnode, bp->fid.unique, bp->size, bp->refCount);
1445                 WriteFile(outputFile, output, strlen(output), &zilch, NULL);
1446             }
1447         }
1448     }
1449   
1450     sprintf(output, "%s - Done dumping buf_HashTable.\n", cookie);
1451     WriteFile(outputFile, output, strlen(output), &zilch, NULL);
1452
1453     lock_ReleaseRead(&buf_globalLock);
1454     return 0;
1455 }
1456