e0ee133fdd2a35d065648753801f132d40a29e91
[openafs.git] / src / WINNT / afsd / cm_buf.c
1 /*
2  * Copyright 2000, International Business Machines Corporation and others.
3  * All Rights Reserved.
4  * 
5  * This software has been released under the terms of the IBM Public
6  * License.  For details, see the LICENSE file in the top-level source
7  * directory or online at http://www.openafs.org/dl/license10.html
8  */
9
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
11
12 #include <afs/param.h>
13 #include <afs/stds.h>
14
15 #ifndef DJGPP
16 #include <windows.h>
17 #endif
18 #include <osi.h>
19 #include <malloc.h>
20 #include <stdio.h>
21 #include <assert.h>
22
23 #include "afsd.h"
24
25 extern void afsi_log(char *pattern, ...);
26
27 /* This module implements the buffer package used by the local transaction
28  * system (cm).  It is initialized by calling cm_Init, which calls buf_Init;
29  * it must be initalized before any of its main routines are called.
30  *
31  * Each buffer is hashed into a hash table by file ID and offset, and if its
32  * reference count is zero, it is also in a free list.
33  *
34  * There are two locks involved in buffer processing.  The global lock
35  * buf_globalLock protects all of the global variables defined in this module,
36  * the reference counts and hash pointers in the actual cm_buf_t structures,
37  * and the LRU queue pointers in the buffer structures.
38  *
39  * The mutexes in the buffer structures protect the remaining fields in the
40  * buffers, as well the data itself.
41  * 
42  * The locking hierarchy here is this:
43  * 
44  * - resv multiple simul. buffers reservation
45  * - lock buffer I/O flags
46  * - lock buffer's mutex
47  * - lock buf_globalLock
48  *
49  */
50
51 /* global debugging log */
52 osi_log_t *buf_logp = NULL;
53
54 /* Global lock protecting hash tables and free lists */
55 osi_rwlock_t buf_globalLock;
56
57 /* ptr to head of the free list (most recently used) and the
58  * tail (the guy to remove first).  We use osi_Q* functions
59  * to put stuff in buf_freeListp, and maintain the end
60  * pointer manually
61  */
62 cm_buf_t *buf_freeListp;
63 cm_buf_t *buf_freeListEndp;
64
65 /* a pointer to a list of all buffers, just so that we can find them
66  * easily for debugging, and for the incr syncer.  Locked under
67  * the global lock.
68  */
69 cm_buf_t *buf_allp;
70
71 /* defaults setup; these variables may be manually assigned into
72  * before calling cm_Init, as a way of changing these defaults.
73  */
74 long buf_nbuffers = CM_BUF_BUFFERS;
75 long buf_nOrigBuffers;
76 long buf_bufferSize = CM_BUF_SIZE;
77 long buf_hashSize = CM_BUF_HASHSIZE;
78 int buf_cacheType = CM_BUF_CACHETYPE_FILE;
79
80 #ifndef DJGPP
81 static
82 HANDLE CacheHandle;
83
84 static
85 SYSTEM_INFO sysInfo;
86 #endif /* !DJGPP */
87
88 /* buffer reservation variables */
89 long buf_reservedBufs;
90 long buf_maxReservedBufs;
91 int buf_reserveWaiting;
92
93 /* callouts for reading and writing data, etc */
94 cm_buf_ops_t *cm_buf_opsp;
95
96 /* pointer to hash table; size computed dynamically */
97 cm_buf_t **buf_hashTablepp;
98
99 /* another hash table */
100 cm_buf_t **buf_fileHashTablepp;
101
102 #ifdef DISKCACHE95
103 /* for experimental disk caching support in Win95 client */
104 cm_buf_t *buf_diskFreeListp;
105 cm_buf_t *buf_diskFreeListEndp;
106 cm_buf_t *buf_diskAllp;
107 extern int cm_diskCacheEnabled;
108 #endif /* DISKCACHE95 */
109
110 /* hold a reference to an already held buffer */
111 void buf_Hold(cm_buf_t *bp)
112 {
113         lock_ObtainWrite(&buf_globalLock);
114         bp->refCount++;
115         lock_ReleaseWrite(&buf_globalLock);
116 }
117
118 /* incremental sync daemon.  Writes 1/10th of all the buffers every 5000 ms */
119 void buf_IncrSyncer(long parm)
120 {
121         cm_buf_t *bp;                   /* buffer we're hacking on; held */
122         long i;                         /* counter */
123         long nAtOnce;                   /* how many to do at once */
124         cm_req_t req;
125
126         lock_ObtainWrite(&buf_globalLock);
127         bp = buf_allp;
128         bp->refCount++;
129         lock_ReleaseWrite(&buf_globalLock);
130         nAtOnce = buf_nbuffers / 10;
131         while (1) {
132 #ifndef DJGPP
133                 i = SleepEx(5000, 1);
134                 if (i != 0) continue;
135 #else
136                 thrd_Sleep(5000);
137 #endif /* DJGPP */
138                 
139                 /* now go through our percentage of the buffers */
140                 for(i=0; i<nAtOnce; i++) {
141                         /* don't want its identity changing while we're
142                          * messing with it, so must do all of this with
143                          * bp held.
144                          */
145
146                         /* start cleaning the buffer; don't touch log pages since
147                          * the log code counts on knowing exactly who is writing
148                          * a log page at any given instant.
149                          */
150                         cm_InitReq(&req);
151                         req.flags |= CM_REQ_NORETRY;
152                         buf_CleanAsync(bp, &req);
153
154                         /* now advance to the next buffer; the allp chain never changes,
155                          * and so can be followed even when holding no locks.
156                          */
157                         lock_ObtainWrite(&buf_globalLock);
158                         buf_LockedRelease(bp);
159                         bp = bp->allp;
160                         if (!bp) bp = buf_allp;
161                         bp->refCount++;
162                         lock_ReleaseWrite(&buf_globalLock);
163                 }       /* for loop over a bunch of buffers */
164         }               /* whole daemon's while loop */
165 }
166
167 #ifndef DJGPP
168 /* Create a security attribute structure suitable for use when the cache file
169  * is created.  What we mainly want is that only the administrator should be
170  * able to do anything with the file.  We create an ACL with only one entry,
171  * an entry that grants all rights to the administrator.
172  */
173 PSECURITY_ATTRIBUTES CreateCacheFileSA()
174 {
175         PSECURITY_ATTRIBUTES psa;
176         PSECURITY_DESCRIPTOR psd;
177         SID_IDENTIFIER_AUTHORITY authority = SECURITY_NT_AUTHORITY;
178         PSID AdminSID;
179         DWORD AdminSIDlength;
180         PACL AdminOnlyACL;
181         DWORD ACLlength;
182
183         /* Get Administrator SID */
184         AllocateAndInitializeSid(&authority, 2,
185                                  SECURITY_BUILTIN_DOMAIN_RID,
186                                  DOMAIN_ALIAS_RID_ADMINS,
187                                  0, 0, 0, 0, 0, 0,
188                                  &AdminSID);
189
190         /* Create Administrator-only ACL */
191         AdminSIDlength = GetLengthSid(AdminSID);
192         ACLlength = sizeof(ACL) + sizeof(ACCESS_ALLOWED_ACE)
193                         + AdminSIDlength - sizeof(DWORD);
194         AdminOnlyACL = GlobalAlloc(GMEM_FIXED, ACLlength);
195         InitializeAcl(AdminOnlyACL, ACLlength, ACL_REVISION);
196         AddAccessAllowedAce(AdminOnlyACL, ACL_REVISION,
197                             STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL,
198                             AdminSID);
199
200         /* Create security descriptor */
201         psd = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_DESCRIPTOR));
202         InitializeSecurityDescriptor(psd, SECURITY_DESCRIPTOR_REVISION);
203         SetSecurityDescriptorDacl(psd, TRUE, AdminOnlyACL, FALSE);
204
205         /* Create security attributes structure */
206         psa = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_ATTRIBUTES));
207         psa->nLength = sizeof(SECURITY_ATTRIBUTES);
208         psa->lpSecurityDescriptor = psd;
209         psa->bInheritHandle = TRUE;
210
211         return psa;
212 }
213 #endif /* !DJGPP */
214
215 #ifndef DJGPP
216 /* Free a security attribute structure created by CreateCacheFileSA() */
217 VOID FreeCacheFileSA(PSECURITY_ATTRIBUTES psa)
218 {
219         BOOL b1, b2;
220         PACL pAcl;
221
222         GetSecurityDescriptorDacl(psa->lpSecurityDescriptor, &b1, &pAcl, &b2);
223         GlobalFree(pAcl);
224         GlobalFree(psa->lpSecurityDescriptor);
225         GlobalFree(psa);
226 }
227 #endif /* !DJGPP */
228         
229 /* initialize the buffer package; called with no locks
230  * held during the initialization phase.
231  */
232 long buf_Init(cm_buf_ops_t *opsp)
233 {
234         static osi_once_t once;
235         cm_buf_t *bp;
236         long sectorSize;
237         thread_t phandle;
238 #ifndef DJGPP
239         HANDLE hf, hm;
240         PSECURITY_ATTRIBUTES psa;
241 #endif /* !DJGPP */
242         long i;
243         unsigned long pid;
244         char *data;
245         long cs;
246
247 #ifndef DJGPP
248         /* Get system info; all we really want is the allocation granularity */ 
249         GetSystemInfo(&sysInfo);
250 #endif /* !DJGPP */
251
252         /* Have to be able to reserve a whole chunk */
253         if (((buf_nbuffers - 3) * buf_bufferSize) < cm_chunkSize)
254                 return CM_ERROR_TOOFEWBUFS;
255
256         /* recall for callouts */
257         cm_buf_opsp = opsp;
258
259         if (osi_Once(&once)) {
260                 /* initialize global locks */
261                 lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
262
263 #ifndef DJGPP
264                 /*
265                  * Cache file mapping constrained by
266                  * system allocation granularity;
267                  * round up, assuming granularity is a power of two
268                  */
269                 cs = buf_nbuffers * buf_bufferSize;
270                 cs = (cs + (sysInfo.dwAllocationGranularity - 1))
271                         & ~(sysInfo.dwAllocationGranularity - 1);
272                 if (cs != buf_nbuffers * buf_bufferSize) {
273                         buf_nbuffers = cs / buf_bufferSize;
274                         afsi_log("Cache size rounded up to %d buffers",
275                                  buf_nbuffers);
276                 }
277 #endif /* !DJGPP */
278
279                 /* remember this for those who want to reset it */
280                 buf_nOrigBuffers = buf_nbuffers;
281
282                 /* lower hash size to a prime number */
283                 buf_hashSize = osi_PrimeLessThan(buf_hashSize);
284
285                 /* create hash table */
286                 buf_hashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
287                 memset((void *)buf_hashTablepp, 0,
288                         buf_hashSize * sizeof(cm_buf_t *));
289
290                 /* another hash table */
291                 buf_fileHashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
292                 memset((void *)buf_fileHashTablepp, 0,
293                         buf_hashSize * sizeof(cm_buf_t *));
294                 
295                 /* min value for which this works */
296                 sectorSize = 1;
297
298 #ifndef DJGPP
299         if(buf_cacheType == CM_BUF_CACHETYPE_FILE) {
300                 /* Reserve buffer space by mapping cache file */
301                 psa = CreateCacheFileSA();
302                 hf = CreateFile(cm_CachePath,
303                         GENERIC_READ | GENERIC_WRITE,
304                         FILE_SHARE_READ | FILE_SHARE_WRITE,
305                         psa,
306                         OPEN_ALWAYS,
307                         FILE_ATTRIBUTE_NORMAL,
308                         NULL);
309                 if (hf == INVALID_HANDLE_VALUE) {
310                         afsi_log("create file error %d", GetLastError());
311                         return CM_ERROR_INVAL;
312                 }
313                 FreeCacheFileSA(psa);
314         } else { /* buf_cacheType == CM_BUF_CACHETYPE_VIRTUAL */
315             hf = INVALID_HANDLE_VALUE;
316         }
317                 CacheHandle = hf;
318                 hm = CreateFileMapping(hf,
319                         NULL,
320                         PAGE_READWRITE,
321                         0, buf_nbuffers * buf_bufferSize,
322                         NULL);
323                 if (hm == NULL) {
324                         if (GetLastError() == ERROR_DISK_FULL) {
325                                 afsi_log("Error creating cache file mapping: disk full");
326                                 return CM_ERROR_TOOMANYBUFS;
327                         }
328                         return CM_ERROR_INVAL;
329                 }
330                 data = MapViewOfFile(hm,
331                         FILE_MAP_ALL_ACCESS,
332                         0, 0,
333                         buf_nbuffers * buf_bufferSize);
334                 if (data == NULL) {
335                         if(hf != INVALID_HANDLE_VALUE) CloseHandle(hf);
336                         CloseHandle(hm);
337                         return CM_ERROR_INVAL;
338                 }
339                 CloseHandle(hm);
340 #else
341                 /* djgpp doesn't support memory mapped files */
342                 data = malloc(buf_nbuffers * buf_bufferSize);
343 #endif /* !DJGPP */
344
345                 /* create buffer headers and put in free list */
346                 bp = malloc(buf_nbuffers * sizeof(cm_buf_t));
347                 buf_allp = NULL;
348                 for(i=0; i<buf_nbuffers; i++) {
349                         /* allocate and zero some storage */
350                         memset(bp, 0, sizeof(cm_buf_t));
351
352                         /* thread on list of all buffers */
353                         bp->allp = buf_allp;
354                         buf_allp = bp;
355                         
356                         osi_QAdd((osi_queue_t **)&buf_freeListp, &bp->q);
357                         bp->flags |= CM_BUF_INLRU;
358                         lock_InitializeMutex(&bp->mx, "Buffer mutex");
359
360                         /* grab appropriate number of bytes from aligned zone */
361                         bp->datap = data;
362
363                         /* setup last buffer pointer */
364                         if (i == 0)
365                                 buf_freeListEndp = bp;
366
367                         /* next */
368                         bp++;
369                         data += buf_bufferSize;
370                 }
371                 
372                 /* none reserved at first */
373                 buf_reservedBufs = 0;
374                 
375                 /* just for safety's sake */
376                 buf_maxReservedBufs = buf_nbuffers - 3;
377                 
378                 /* init the buffer trace log */
379                 buf_logp = osi_LogCreate("buffer", 10);
380
381                 osi_EndOnce(&once);
382                 
383                 /* and create the incr-syncer */
384                 phandle = thrd_Create(0, 0,
385                                       (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
386                                       "buf_IncrSyncer");
387
388                 osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
389 #ifndef DJGPP
390                 CloseHandle(phandle);
391 #endif /* !DJGPP */
392         }
393
394         return 0;
395 }
396
397 /* add nbuffers to the buffer pool, if possible.
398  * Called with no locks held.
399  */
400 long buf_AddBuffers(long nbuffers)
401 {
402         cm_buf_t *bp;
403         int i;
404         char *data;
405 #ifndef DJGPP
406         HANDLE hm;
407         long cs;
408
409     afsi_log("%d buffers being added to the existing cache of size %d",
410               nbuffers, buf_nbuffers);
411
412     if (buf_cacheType == CM_BUF_CACHETYPE_VIRTUAL) {
413         /* The size of a virtual cache cannot be changed after it has
414          * been created.  Subsequent calls to MapViewofFile() with
415          * an existing mapping object name would not allow the 
416          * object to be resized.  Return failure immediately.
417          */
418         return CM_ERROR_INVAL;
419     }
420
421         /*
422          * Cache file mapping constrained by
423          * system allocation granularity;
424          * round up, assuming granularity is a power of two;
425          * assume existing cache size is already rounded
426          */
427         cs = nbuffers * buf_bufferSize;
428         cs = (cs + (sysInfo.dwAllocationGranularity - 1))
429                 & ~(sysInfo.dwAllocationGranularity - 1);
430         if (cs != nbuffers * buf_bufferSize) {
431                 nbuffers = cs / buf_bufferSize;
432         }
433
434         /* Reserve additional buffer space by remapping cache file */
435         hm = CreateFileMapping(CacheHandle,
436                 NULL,
437                 PAGE_READWRITE,
438                 0, (buf_nbuffers + nbuffers) * buf_bufferSize,
439                 NULL);
440         if (hm == NULL) {
441                 if (GetLastError() == ERROR_DISK_FULL)
442                         return CM_ERROR_TOOMANYBUFS;
443                 else
444                         return CM_ERROR_INVAL;
445         }
446         data = MapViewOfFile(hm,
447                 FILE_MAP_ALL_ACCESS,
448                 0, buf_nbuffers * buf_bufferSize,
449                 nbuffers * buf_bufferSize);
450         if (data == NULL) {
451                 CloseHandle(hm);
452                 return CM_ERROR_INVAL;
453         }
454         CloseHandle(hm);
455 #else
456         data = malloc(buf_nbuffers * buf_bufferSize);
457 #endif /* DJGPP */
458
459         /* Create buffer headers and put in free list */
460         bp = malloc(nbuffers * sizeof(*bp));
461
462         for(i=0; i<nbuffers; i++) {
463                 memset(bp, 0, sizeof(*bp));
464         
465                 lock_InitializeMutex(&bp->mx, "cm_buf_t");
466
467                 /* grab appropriate number of bytes from aligned zone */
468                 bp->datap = data;
469
470                 bp->flags |= CM_BUF_INLRU;
471                 
472                 lock_ObtainWrite(&buf_globalLock);
473                 /* note that buf_allp chain is covered by buf_globalLock now */
474                 bp->allp = buf_allp;
475                 buf_allp = bp;
476                 osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
477                 if (!buf_freeListEndp) buf_freeListEndp = bp;
478                 buf_nbuffers++;
479                 lock_ReleaseWrite(&buf_globalLock);
480
481                 bp++;
482                 data += buf_bufferSize;
483         
484         }        /* for loop over all buffers */
485
486         return 0;
487 }
488
489 /* interface to set the number of buffers to an exact figure.
490  * Called with no locks held.
491  */
492 long buf_SetNBuffers(long nbuffers)
493 {
494     if (nbuffers < 10) 
495         return CM_ERROR_INVAL;
496     if (nbuffers == buf_nbuffers) 
497         return 0;
498         else if (nbuffers > buf_nbuffers)
499                 return buf_AddBuffers(nbuffers - buf_nbuffers);
500     else 
501         return CM_ERROR_INVAL;
502 }
503
504 /* release a buffer.  Buffer must be referenced, but unlocked. */
505 void buf_Release(cm_buf_t *bp)
506 {
507         lock_ObtainWrite(&buf_globalLock);
508         buf_LockedRelease(bp);
509         lock_ReleaseWrite(&buf_globalLock);
510 }
511
512 /* wait for reading or writing to clear; called with write-locked
513  * buffer, and returns with locked buffer.
514  */
515 void buf_WaitIO(cm_buf_t *bp)
516 {
517         while (1) {
518                 /* if no IO is happening, we're done */
519                 if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
520                         break;
521                 
522         /* otherwise I/O is happening, but some other thread is waiting for
523          * the I/O already.  Wait for that guy to figure out what happened,
524          * and then check again.
525          */
526         if ( bp->flags & CM_BUF_WAITING ) 
527             osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING already set for 0x%x", bp);
528
529         bp->flags |= CM_BUF_WAITING;
530         osi_SleepM((long) bp, &bp->mx);
531         lock_ObtainMutex(&bp->mx);
532                 osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%x", bp);
533     }
534         
535     /* if we get here, the IO is done, but we may have to wakeup people waiting for
536      * the I/O to complete.  Do so.
537      */
538     if (bp->flags & CM_BUF_WAITING) {
539                 bp->flags &= ~CM_BUF_WAITING;
540         osi_Wakeup((long) bp);
541     }
542     osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%x", (long) bp);
543 }
544
545 /* code to drop reference count while holding buf_globalLock */
546 void buf_LockedRelease(cm_buf_t *bp)
547 {
548         /* ensure that we're in the LRU queue if our ref count is 0 */
549         osi_assert(bp->refCount > 0);
550         if (--bp->refCount == 0) {
551                 if (!(bp->flags & CM_BUF_INLRU)) {
552                         osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
553
554                         /* watch for transition from empty to one element */
555                         if (!buf_freeListEndp)
556                                 buf_freeListEndp = buf_freeListp;
557                         bp->flags |= CM_BUF_INLRU;
558                 }
559         }
560 }
561
562 /* find a buffer, if any, for a particular file ID and offset.  Assumes
563  * that buf_globalLock is write locked when called.
564  */
565 cm_buf_t *buf_LockedFind(struct cm_scache *scp, osi_hyper_t *offsetp)
566 {
567         long i;
568         cm_buf_t *bp;
569         
570         i = BUF_HASH(&scp->fid, offsetp);
571         for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) {
572                 if (cm_FidCmp(&scp->fid, &bp->fid) == 0
573                         && offsetp->LowPart == bp->offset.LowPart
574                         && offsetp->HighPart == bp->offset.HighPart) {
575                         bp->refCount++;
576                         break;
577                 }
578         }
579         
580         /* return whatever we found, if anything */
581         return bp;
582 }
583
584 /* find a buffer with offset *offsetp for vnode *scp.  Called
585  * with no locks held.
586  */
587 cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
588 {
589         cm_buf_t *bp;
590
591         lock_ObtainWrite(&buf_globalLock);
592         bp = buf_LockedFind(scp, offsetp);
593         lock_ReleaseWrite(&buf_globalLock);
594
595         return bp;
596 }
597
598 /* start cleaning I/O on this buffer.  Buffer must be write locked, and is returned
599  * write-locked.
600  *
601  * Makes sure that there's only one person writing this block
602  * at any given time, and also ensures that the log is forced sufficiently far,
603  * if this buffer contains logged data.
604  */
605 void buf_LockedCleanAsync(cm_buf_t *bp, cm_req_t *reqp)
606 {
607         long code;
608
609         code = 0;
610         while ((bp->flags & (CM_BUF_WRITING | CM_BUF_DIRTY)) == CM_BUF_DIRTY) {
611                 lock_ReleaseMutex(&bp->mx);
612
613                 code = (*cm_buf_opsp->Writep)(&bp->fid, &bp->offset,
614                                                 buf_bufferSize, 0, bp->userp,
615                                                 reqp);
616                 
617                 lock_ObtainMutex(&bp->mx);
618                 if (code) break;
619
620 #ifdef DISKCACHE95
621                 /* Disk cache support */
622                 /* write buffer to disk cache (synchronous for now) */
623                 diskcache_Update(bp->dcp, bp->datap, buf_bufferSize, bp->dataVersion);
624 #endif /* DISKCACHE95 */
625         };
626
627         /* do logging after call to GetLastError, or else */
628         osi_Log2(buf_logp, "buf_CleanAsync starts I/O on 0x%x, done=%d", bp, code);
629         
630         /* if someone was waiting for the I/O that just completed or failed,
631          * wake them up.
632          */
633         if (bp->flags & CM_BUF_WAITING) {
634                 /* turn off flags and wakeup users */
635                 bp->flags &= ~CM_BUF_WAITING;
636                 osi_Wakeup((long) bp);
637         }
638 }
639
640 /* Called with a zero-ref count buffer and with the buf_globalLock write locked.
641  * recycles the buffer, and leaves it ready for reuse with a ref count of 1.
642  * The buffer must already be clean, and no I/O should be happening to it.
643  */
644 void buf_Recycle(cm_buf_t *bp)
645 {
646         int i;
647         cm_buf_t **lbpp;
648         cm_buf_t *tbp;
649         cm_buf_t *prevBp, *nextBp;
650
651         /* if we get here, we know that the buffer still has a 0 ref count,
652          * and that it is clean and has no currently pending I/O.  This is
653          * the dude to return.
654          * Remember that as long as the ref count is 0, we know that we won't
655          * have any lock conflicts, so we can grab the buffer lock out of
656          * order in the locking hierarchy.
657          */
658     osi_Log2( buf_logp, "buf_Recycle recycles 0x%x, off 0x%x",
659                 bp, bp->offset.LowPart);
660
661         osi_assert(bp->refCount == 0);
662         osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)));
663         lock_AssertWrite(&buf_globalLock);
664
665         if (bp->flags & CM_BUF_INHASH) {
666                 /* Remove from hash */
667
668                 i = BUF_HASH(&bp->fid, &bp->offset);
669                 lbpp = &(buf_hashTablepp[i]);
670                 for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
671                         if (tbp == bp) break;
672                 }
673
674                 /* we better find it */
675                 osi_assertx(tbp != NULL, "buf_GetNewLocked: hash table screwup");
676
677                 *lbpp = bp->hashp;      /* hash out */
678
679                 /* Remove from file hash */
680
681                 i = BUF_FILEHASH(&bp->fid);
682                 prevBp = bp->fileHashBackp;
683                 nextBp = bp->fileHashp;
684                 if (prevBp)
685                         prevBp->fileHashp = nextBp;
686                 else
687                         buf_fileHashTablepp[i] = nextBp;
688                 if (nextBp)
689                         nextBp->fileHashBackp = prevBp;
690
691                 bp->flags &= ~CM_BUF_INHASH;
692         }
693                         
694         /* bump the soft reference counter now, to invalidate softRefs; no
695          * wakeup is required since people don't sleep waiting for this
696          * counter to change.
697          */
698         bp->idCounter++;
699
700         /* make the fid unrecognizable */
701         memset(&bp->fid, 0, sizeof(bp->fid));
702 }
703
704 /* recycle a buffer, removing it from the free list, hashing in its new identity
705  * and returning it write-locked so that no one can use it.  Called without
706  * any locks held, and can return an error if it loses the race condition and 
707  * finds that someone else created the desired buffer.
708  *
709  * If success is returned, the buffer is returned write-locked.
710  *
711  * May be called with null scp and offsetp, if we're just trying to reclaim some
712  * space from the buffer pool.  In that case, the buffer will be returned
713  * without being hashed into the hash table.
714  */
715 long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
716 {
717         cm_buf_t *bp;           /* buffer we're dealing with */
718         cm_buf_t *nextBp;       /* next buffer in file hash chain */
719         long i;                 /* temp */
720         cm_req_t req;
721
722         cm_InitReq(&req);       /* just in case */
723
724         while(1) {
725 retry:
726                 lock_ObtainWrite(&buf_globalLock);
727                 /* check to see if we lost the race */
728                 if (scp) {
729                         if (bp = buf_LockedFind(scp, offsetp)) {
730                                 bp->refCount--;
731                                 lock_ReleaseWrite(&buf_globalLock);
732                                 return CM_BUF_EXISTS;
733                         }
734                 }
735                 
736                 /* for debugging, assert free list isn't empty, although we
737                  * really should try waiting for a running tranasction to finish
738                  * instead of this; or better, we should have a transaction
739                  * throttler prevent us from entering this situation.
740                  */
741                 osi_assertx(buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
742
743                 /* look at all buffers in free list, some of which may temp.
744                  * have high refcounts and which then should be skipped,
745                  * starting cleaning I/O for those which are dirty.  If we find
746                  * a clean buffer, we rehash it, lock it and return it.
747                  */
748                 for(bp = buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
749                         /* check to see if it really has zero ref count.  This
750                          * code can bump refcounts, at least, so it may not be
751                          * zero.
752                          */
753                         if (bp->refCount > 0) continue;
754                         
755                         /* we don't have to lock buffer itself, since the ref
756                          * count is 0 and we know it will stay zero as long as
757                          * we hold the global lock.
758                          */
759
760                         /* don't recycle someone in our own chunk */
761                         if (!cm_FidCmp(&bp->fid, &scp->fid)
762                             && (bp->offset.LowPart & (-cm_chunkSize))
763                                   == (offsetp->LowPart & (-cm_chunkSize)))
764                                 continue;
765
766                         /* if this page is being filled (!) or cleaned, see if
767                          * the I/O has completed.  If not, skip it, otherwise
768                          * do the final processing for the I/O.
769                          */
770                         if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
771                                 /* probably shouldn't do this much work while
772                                  * holding the big lock?  Watch for contention
773                                  * here.
774                                  */
775                                 continue;
776                         }
777                         
778                         if (bp->flags & CM_BUF_DIRTY) {
779                                 /* if the buffer is dirty, start cleaning it and
780                                  * move on to the next buffer.  We do this with
781                                  * just the lock required to minimize contention
782                                  * on the big lock.
783                                  */
784                                 bp->refCount++;
785                                 lock_ReleaseWrite(&buf_globalLock);
786
787                                 /* grab required lock and clean; this only
788                                  * starts the I/O.  By the time we're back,
789                                  * it'll still be marked dirty, but it will also
790                                  * have the WRITING flag set, so we won't get
791                                  * back here.
792                                  */
793                                 buf_CleanAsync(bp, &req);
794                                 
795                                 /* now put it back and go around again */
796                                 buf_Release(bp);
797                                 goto retry;
798                         }
799                         
800                         /* if we get here, we know that the buffer still has a 0
801                          * ref count, and that it is clean and has no currently
802                          * pending I/O.  This is the dude to return.
803                          * Remember that as long as the ref count is 0, we know
804                          * that we won't have any lock conflicts, so we can grab
805                          * the buffer lock out of order in the locking hierarchy.
806                          */
807                         buf_Recycle(bp);
808
809                         /* clean up junk flags */
810                         bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
811                         bp->dataVersion = -1;   /* unknown so far */
812
813                         /* now hash in as our new buffer, and give it the
814                          * appropriate label, if requested.
815                          */
816                         if (scp) {
817                                 bp->flags |= CM_BUF_INHASH;
818                                 bp->fid = scp->fid;
819                                 bp->offset = *offsetp;
820                                 i = BUF_HASH(&scp->fid, offsetp);
821                                 bp->hashp = buf_hashTablepp[i];
822                                 buf_hashTablepp[i] = bp;
823                                 i = BUF_FILEHASH(&scp->fid);
824                                 nextBp = buf_fileHashTablepp[i];
825                                 bp->fileHashp = nextBp;
826                                 bp->fileHashBackp = NULL;
827                                 if (nextBp)
828                                         nextBp->fileHashBackp = bp;
829                                 buf_fileHashTablepp[i] = bp;
830                         }
831                         
832                         /* prepare to return it.  Start by giving it a good
833                          * refcount */
834                         bp->refCount = 1;
835                         
836                         /* and since it has a non-zero ref count, we should move
837                          * it from the lru queue.  It better be still there,
838                          * since we've held the global (big) lock since we found
839                          * it there.
840                          */
841                         osi_assertx(bp->flags & CM_BUF_INLRU,
842                                     "buf_GetNewLocked: LRU screwup");
843                         if (buf_freeListEndp == bp) {
844                                 /* we're the last guy in this queue, so maintain it */
845                                 buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
846                         }
847                         osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
848                         bp->flags &= ~CM_BUF_INLRU;
849                         
850                         /* finally, grab the mutex so that people don't use it
851                          * before the caller fills it with data.  Again, no one 
852                          * should have been able to get to this dude to lock it.
853                          */
854                         osi_assertx(lock_TryMutex(&bp->mx),
855                                     "buf_GetNewLocked: TryMutex failed");
856
857                         lock_ReleaseWrite(&buf_globalLock);
858                         *bufpp = bp;
859                         return 0;
860                 } /* for all buffers in lru queue */
861                 lock_ReleaseWrite(&buf_globalLock);
862         }       /* while loop over everything */
863         /* not reached */
864 } /* the proc */
865
866 /* get a page, returning it held but unlocked.  Doesn't fill in the page
867  * with I/O, since we're going to write the whole thing new.
868  */
869 long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
870 {
871         cm_buf_t *bp;
872         long code;
873         osi_hyper_t pageOffset;
874         int created;
875
876         created = 0;
877         pageOffset.HighPart = offsetp->HighPart;
878         pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
879         while (1) {
880                 lock_ObtainWrite(&buf_globalLock);
881                 bp = buf_LockedFind(scp, &pageOffset);
882                 lock_ReleaseWrite(&buf_globalLock);
883                 if (bp) {
884                         /* lock it and break out */
885                         lock_ObtainMutex(&bp->mx);
886                         break;
887                 }
888                 
889                 /* otherwise, we have to create a page */
890                 code = buf_GetNewLocked(scp, &pageOffset, &bp);
891
892                 /* check if the buffer was created in a race condition branch.
893                  * If so, go around so we can hold a reference to it. 
894                  */
895                 if (code == CM_BUF_EXISTS) continue;
896                 
897                 /* something else went wrong */
898                 if (code != 0) return code;
899                 
900                 /* otherwise, we have a locked buffer that we just created */
901                 created = 1;
902                 break;
903         } /* big while loop */
904         
905         /* wait for reads */
906         if (bp->flags & CM_BUF_READING)
907                 buf_WaitIO(bp);
908
909         /* once it has been read once, we can unlock it and return it, still
910          * with its refcount held.
911          */
912         lock_ReleaseMutex(&bp->mx);
913         *bufpp = bp;
914         osi_Log3(buf_logp, "buf_GetNew returning bp 0x%x for file 0x%x, offset 0x%x",
915                 bp, (long) scp, offsetp->LowPart);
916         return 0;
917 }
918
919 /* get a page, returning it held but unlocked.  Make sure it is complete */
920 long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
921 {
922         cm_buf_t *bp;
923         long code;
924         osi_hyper_t pageOffset;
925         unsigned long tcount;
926         int created;
927 #ifdef DISKCACHE95
928         cm_diskcache_t *dcp;
929 #endif /* DISKCACHE95 */
930
931         created = 0;
932         pageOffset.HighPart = offsetp->HighPart;
933         pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
934         while (1) {
935                 lock_ObtainWrite(&buf_globalLock);
936                 bp = buf_LockedFind(scp, &pageOffset);
937                 lock_ReleaseWrite(&buf_globalLock);
938                 if (bp) {
939                         /* lock it and break out */
940                         lock_ObtainMutex(&bp->mx);
941                         break;
942
943 #ifdef DISKCACHE95
944                         /* touch disk chunk to update LRU info */
945                         diskcache_Touch(bp->dcp);
946 #endif /* DISKCACHE95 */
947                 }
948                 
949                 /* otherwise, we have to create a page */
950                 code = buf_GetNewLocked(scp, &pageOffset, &bp);
951
952                 /* check if the buffer was created in a race condition branch.
953                  * If so, go around so we can hold a reference to it. 
954                  */
955                 if (code == CM_BUF_EXISTS) continue;
956                 
957                 /* something else went wrong */
958                 if (code != 0) return code;
959                 
960                 /* otherwise, we have a locked buffer that we just created */
961                 created = 1;
962                 break;
963         } /* big while loop */
964         
965         /* if we get here, we have a locked buffer that may have just been
966          * created, in which case it needs to be filled with data.
967          */
968         if (created) {
969                 /* load the page; freshly created pages should be idle */
970                 osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)));
971
972                 /* setup offset, event */
973 #ifndef DJGPP  /* doesn't seem to be used */
974                 bp->over.Offset = bp->offset.LowPart;
975                 bp->over.OffsetHigh = bp->offset.HighPart;
976 #endif /* !DJGPP */
977
978                 /* start the I/O; may drop lock */
979                 bp->flags |= CM_BUF_READING;
980                 code = (*cm_buf_opsp->Readp)(bp, buf_bufferSize, &tcount, NULL);
981
982 #ifdef DISKCACHE95
983                 code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, buf_bufferSize, &bp->dataVersion, &tcount, &dcp);
984                 bp->dcp = dcp;    /* pointer to disk cache struct. */
985 #endif /* DISKCACHE95 */
986
987                 if (code != 0) {
988                         /* failure or queued */
989 #ifndef DJGPP   /* cm_bufRead always returns 0 */
990                         if (code != ERROR_IO_PENDING) {
991 #endif
992                                 bp->error = code;
993                                 bp->flags |= CM_BUF_ERROR;
994                                 bp->flags &= ~CM_BUF_READING;
995                                 if (bp->flags & CM_BUF_WAITING) {
996                                         bp->flags &= ~CM_BUF_WAITING;
997                                         osi_Wakeup((long) bp);
998                                 }
999                                 lock_ReleaseMutex(&bp->mx);
1000                                 buf_Release(bp);
1001                                 return code;
1002 #ifndef DJGPP
1003                         }
1004 #endif
1005                 } else {
1006                         /* otherwise, I/O completed instantly and we're done, except
1007                          * for padding the xfr out with 0s and checking for EOF
1008                          */
1009                         if (tcount < (unsigned long) buf_bufferSize) {
1010                                 memset(bp->datap+tcount, 0, buf_bufferSize - tcount);
1011                                 if (tcount == 0)
1012                                         bp->flags |= CM_BUF_EOF;
1013                         }
1014                         bp->flags &= ~CM_BUF_READING;
1015                         if (bp->flags & CM_BUF_WAITING) {
1016                                 bp->flags &= ~CM_BUF_WAITING;
1017                                 osi_Wakeup((long) bp);
1018                         }
1019                 }
1020                         
1021         } /* if created */
1022         
1023         /* wait for reads, either that which we started above, or that someone
1024          * else started.  We don't care if we return a buffer being cleaned.
1025          */
1026         if (bp->flags & CM_BUF_READING)
1027                 buf_WaitIO(bp);
1028
1029         /* once it has been read once, we can unlock it and return it, still
1030          * with its refcount held.
1031          */
1032         lock_ReleaseMutex(&bp->mx);
1033         *bufpp = bp;
1034
1035         /* now remove from queue; will be put in at the head (farthest from
1036          * being recycled) when we're done in buf_Release.
1037          */
1038         lock_ObtainWrite(&buf_globalLock);
1039         if (bp->flags & CM_BUF_INLRU) {
1040                 if (buf_freeListEndp == bp)
1041                         buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
1042                 osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
1043                 bp->flags &= ~CM_BUF_INLRU;
1044         }
1045         lock_ReleaseWrite(&buf_globalLock);
1046
1047         osi_Log3(buf_logp, "buf_Get returning bp 0x%x for file 0x%x, offset 0x%x",
1048                 bp, (long) scp, offsetp->LowPart);
1049         return 0;
1050 }
1051
1052 /* count # of elements in the free list;
1053  * we don't bother doing the proper locking for accessing dataVersion or flags
1054  * since it is a pain, and this is really just an advisory call.  If you need
1055  * to do better at some point, rewrite this function.
1056  */
1057 long buf_CountFreeList(void)
1058 {
1059         long count;
1060         cm_buf_t *bufp;
1061
1062         count = 0;
1063         lock_ObtainRead(&buf_globalLock);
1064         for(bufp = buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
1065                 /* if the buffer doesn't have an identity, or if the buffer
1066                  * has been invalidate (by having its DV stomped upon), then
1067                  * count it as free, since it isn't really being utilized.
1068                  */
1069                 if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
1070                         count++;
1071         }
1072         lock_ReleaseRead(&buf_globalLock);
1073         return count;
1074 }
1075
1076 /* clean a buffer synchronously */
1077 void buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
1078 {
1079         lock_ObtainMutex(&bp->mx);
1080         buf_LockedCleanAsync(bp, reqp);
1081         lock_ReleaseMutex(&bp->mx);
1082 }
1083
1084 /* wait for a buffer's cleaning to finish */
1085 void buf_CleanWait(cm_buf_t *bp)
1086 {
1087         lock_ObtainMutex(&bp->mx);
1088         if (bp->flags & CM_BUF_WRITING) {
1089                 buf_WaitIO(bp);
1090         }
1091         lock_ReleaseMutex(&bp->mx);
1092 }
1093
1094 /* set the dirty flag on a buffer, and set associated write-ahead log,
1095  * if there is one.  Allow one to be added to a buffer, but not changed.
1096  *
1097  * The buffer must be locked before calling this routine.
1098  */
1099 void buf_SetDirty(cm_buf_t *bp)
1100 {
1101         osi_assert(bp->refCount > 0);
1102         
1103         osi_Log1(buf_logp, "buf_SetDirty 0x%x", bp);
1104
1105         /* set dirty bit */
1106         bp->flags |= CM_BUF_DIRTY;
1107
1108         /* and turn off EOF flag, since it has associated data now */
1109         bp->flags &= ~CM_BUF_EOF;
1110 }
1111
1112 /* clean all buffers, reset log pointers and invalidate all buffers.
1113  * Called with no locks held, and returns with same.
1114  *
1115  * This function is guaranteed to clean and remove the log ptr of all the
1116  * buffers that were dirty or had non-zero log ptrs before the call was
1117  * made.  That's sufficient to clean up any garbage left around by recovery,
1118  * which is all we're counting on this for; there may be newly created buffers
1119  * added while we're running, but that should be OK.
1120  *
1121  * In an environment where there are no transactions (artificially imposed, for
1122  * example, when switching the database to raw mode), this function is used to
1123  * make sure that all updates have been written to the disk.  In that case, we don't
1124  * really require that we forget the log association between pages and logs, but
1125  * it also doesn't hurt.  Since raw mode I/O goes through this buffer package, we don't
1126  * have to worry about invalidating data in the buffers.
1127  *
1128  * This function is used at the end of recovery as paranoia to get the recovered
1129  * database out to disk.  It removes all references to the recovery log and cleans
1130  * all buffers.
1131  */
1132 long buf_CleanAndReset(void)
1133 {
1134         long i;
1135         cm_buf_t *bp;
1136         cm_req_t req;
1137
1138         lock_ObtainWrite(&buf_globalLock);
1139         for(i=0; i<buf_hashSize; i++) {
1140                 for(bp = buf_hashTablepp[i]; bp; bp = bp->hashp) {
1141                         bp->refCount++;
1142                         lock_ReleaseWrite(&buf_globalLock);
1143                         
1144                         /* now no locks are held; clean buffer and go on */
1145                         cm_InitReq(&req);
1146                         buf_CleanAsync(bp, &req);
1147                         buf_CleanWait(bp);
1148                         
1149                         /* relock and release buffer */
1150                         lock_ObtainWrite(&buf_globalLock);
1151                         buf_LockedRelease(bp);
1152                 } /* over one bucket */
1153         }       /* for loop over all hash buckets */
1154         
1155         /* release locks */
1156         lock_ReleaseWrite(&buf_globalLock);
1157
1158         /* and we're done */
1159         return 0;
1160 }
1161
1162 /* called without global lock being held, reserves buffers for callers
1163  * that need more than one held (not locked) at once.
1164  */
1165 void buf_ReserveBuffers(long nbuffers)
1166 {
1167         lock_ObtainWrite(&buf_globalLock);
1168         while (1) {
1169                 if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
1170                         buf_reserveWaiting = 1;
1171                         osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
1172                         osi_SleepW((long) &buf_reservedBufs, &buf_globalLock);
1173                         lock_ObtainWrite(&buf_globalLock);
1174                 }
1175                 else {
1176                         buf_reservedBufs += nbuffers;
1177                         break;
1178                 }
1179         }
1180         lock_ReleaseWrite(&buf_globalLock);
1181 }
1182
1183 int buf_TryReserveBuffers(long nbuffers)
1184 {
1185         int code;
1186
1187         lock_ObtainWrite(&buf_globalLock);
1188         if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
1189                 code = 0;
1190         }
1191         else {
1192                 buf_reservedBufs += nbuffers;
1193                 code = 1;
1194         }
1195         lock_ReleaseWrite(&buf_globalLock);
1196         return code;
1197 }
1198
1199 /* called without global lock held, releases reservation held by
1200  * buf_ReserveBuffers.
1201  */
1202 void buf_UnreserveBuffers(long nbuffers)
1203 {
1204         lock_ObtainWrite(&buf_globalLock);
1205         buf_reservedBufs -= nbuffers;
1206         if (buf_reserveWaiting) {
1207                 buf_reserveWaiting = 0;
1208                 osi_Wakeup((long) &buf_reservedBufs);
1209         }
1210         lock_ReleaseWrite(&buf_globalLock);
1211 }
1212
1213 /* truncate the buffers past sizep, zeroing out the page, if we don't
1214  * end on a page boundary.
1215  *
1216  * Requires cm_bufCreateLock to be write locked.
1217  */
1218 long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
1219         osi_hyper_t *sizep)
1220 {
1221         cm_buf_t *bufp;
1222         cm_buf_t *nbufp;                        /* next buffer, if didRelease */
1223         osi_hyper_t bufEnd;
1224         long code;
1225         long bufferPos;
1226         int didRelease;
1227         long i;
1228         
1229         /* assert that cm_bufCreateLock is held in write mode */
1230         lock_AssertWrite(&scp->bufCreateLock);
1231
1232         i = BUF_FILEHASH(&scp->fid);
1233
1234         lock_ObtainWrite(&buf_globalLock);
1235         bufp = buf_fileHashTablepp[i];
1236         if (bufp == NULL) {
1237                 lock_ReleaseWrite(&buf_globalLock);
1238                 return 0;
1239         }
1240
1241         bufp->refCount++;
1242         lock_ReleaseWrite(&buf_globalLock);
1243         for(; bufp; bufp = nbufp) {
1244                 didRelease = 0;
1245                 lock_ObtainMutex(&bufp->mx);
1246
1247                 bufEnd.HighPart = 0;
1248                 bufEnd.LowPart = buf_bufferSize;
1249                 bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
1250
1251                 if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
1252                         LargeIntegerLessThan(*sizep, bufEnd)) {
1253                         buf_WaitIO(bufp);
1254                 }
1255                 lock_ObtainMutex(&scp->mx);
1256         
1257                 /* make sure we have a callback (so we have the right value for
1258                  * the length), and wait for it to be safe to do a truncate.
1259                  */
1260                 code = cm_SyncOp(scp, bufp, userp, reqp, 0,
1261                                  CM_SCACHESYNC_NEEDCALLBACK
1262                                  | CM_SCACHESYNC_GETSTATUS
1263                                  | CM_SCACHESYNC_SETSIZE
1264                                  | CM_SCACHESYNC_BUFLOCKED);
1265                 /* if we succeeded in our locking, and this applies to the right
1266                  * file, and the truncate request overlaps the buffer either
1267                  * totally or partially, then do something.
1268                  */
1269                 if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
1270                         && LargeIntegerLessThan(*sizep, bufEnd)) {
1271                         
1272                         lock_ObtainWrite(&buf_globalLock);
1273
1274                         /* destroy the buffer, turning off its dirty bit, if
1275                          * we're truncating the whole buffer.  Otherwise, set
1276                          * the dirty bit, and clear out the tail of the buffer
1277                          * if we just overlap some.
1278                          */
1279                         if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
1280                                 /* truncating the entire page */
1281                                 bufp->flags &= ~CM_BUF_DIRTY;
1282                                 bufp->dataVersion = -1; /* known bad */
1283                                 bufp->dirtyCounter++;
1284                         }
1285                         else {
1286                                 /* don't set dirty, since dirty implies
1287                                  * currently up-to-date.  Don't need to do this,
1288                                  * since we'll update the length anyway.
1289                                  *
1290                                  * Zero out remainder of the page, in case we
1291                                  * seek and write past EOF, and make this data
1292                                  * visible again.
1293                                  */
1294                                 bufferPos = sizep->LowPart & (buf_bufferSize - 1);
1295                                 osi_assert(bufferPos != 0);
1296                                 memset(bufp->datap + bufferPos, 0,
1297                                         buf_bufferSize - bufferPos);
1298                         }
1299
1300                         lock_ReleaseWrite(&buf_globalLock);
1301                 }
1302                 
1303                 lock_ReleaseMutex(&scp->mx);
1304                 lock_ReleaseMutex(&bufp->mx);
1305                 if (!didRelease) {
1306                         lock_ObtainWrite(&buf_globalLock);
1307                         nbufp = bufp->fileHashp;
1308                         if (nbufp) nbufp->refCount++;
1309                         buf_LockedRelease(bufp);
1310                         lock_ReleaseWrite(&buf_globalLock);
1311                 }
1312
1313                 /* bail out early if we fail */
1314                 if (code) {
1315                         /* at this point, nbufp is held; bufp has already been
1316                          * released.
1317                          */
1318                         if (nbufp) buf_Release(nbufp);
1319                         return code;
1320                 }
1321         }
1322         
1323         /* success */
1324         return 0;
1325 }
1326
1327 long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
1328 {
1329         long code;
1330         cm_buf_t *bp;           /* buffer we're hacking on */
1331         cm_buf_t *nbp;
1332         int didRelease;
1333         long i;
1334
1335         i = BUF_FILEHASH(&scp->fid);
1336
1337         code = 0;
1338         lock_ObtainWrite(&buf_globalLock);
1339         bp = buf_fileHashTablepp[i];
1340         if (bp) bp->refCount++;
1341         lock_ReleaseWrite(&buf_globalLock);
1342         for(; bp; bp = nbp) {
1343                 didRelease = 0; /* haven't released this buffer yet */
1344
1345                 /* clean buffer synchronously */
1346                 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1347                         lock_ObtainMutex(&bp->mx);
1348
1349                         /* start cleaning the buffer, and wait for it to finish */
1350                         buf_LockedCleanAsync(bp, reqp);
1351                         buf_WaitIO(bp);
1352                         lock_ReleaseMutex(&bp->mx);
1353
1354                         code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
1355                         if (code) goto skip;
1356
1357                         lock_ObtainWrite(&buf_globalLock);
1358                         /* actually, we only know that buffer is clean if ref
1359                          * count is 1, since we don't have buffer itself locked.
1360                          */
1361                         if (!(bp->flags & CM_BUF_DIRTY)) {
1362                                 if (bp->refCount == 1) {        /* bp is held above */
1363                                         buf_LockedRelease(bp);
1364                                         nbp = bp->fileHashp;
1365                                         if (nbp) nbp->refCount++;
1366                                         didRelease = 1;
1367                                         buf_Recycle(bp);
1368                                 }
1369                         }
1370                         lock_ReleaseWrite(&buf_globalLock);
1371
1372                         (*cm_buf_opsp->Unstabilizep)(scp, userp);
1373                 }
1374
1375 skip:
1376                 if (!didRelease) {
1377                         lock_ObtainWrite(&buf_globalLock);
1378                         if (nbp = bp->fileHashp) nbp->refCount++;
1379                         buf_LockedRelease(bp);
1380                         lock_ReleaseWrite(&buf_globalLock);
1381                 }
1382         }       /* for loop over a bunch of buffers */
1383         
1384         /* done */
1385         return code;
1386 }
1387
1388 long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
1389 {
1390         long code;
1391         cm_buf_t *bp;           /* buffer we're hacking on */
1392     cm_buf_t *nbp;              /* next one */
1393         long i;
1394
1395         i = BUF_FILEHASH(&scp->fid);
1396
1397         code = 0;
1398         lock_ObtainWrite(&buf_globalLock);
1399     bp = buf_fileHashTablepp[i];
1400     if (bp) bp->refCount++;
1401     lock_ReleaseWrite(&buf_globalLock);
1402         for(; bp; bp = nbp) {
1403                 /* clean buffer synchronously */
1404                 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1405                         if (userp) {
1406                 cm_HoldUser(userp);
1407                                 lock_ObtainMutex(&bp->mx);
1408                                 if (bp->userp) 
1409                     cm_ReleaseUser(bp->userp);
1410                 bp->userp = userp;
1411                                 lock_ReleaseMutex(&bp->mx);
1412             }
1413                         buf_CleanAsync(bp, reqp);
1414             buf_CleanWait(bp);
1415             lock_ObtainMutex(&bp->mx);
1416                         if (bp->flags & CM_BUF_ERROR) {
1417                                 if (code == 0 || code == -1) code = bp->error;
1418                 if (code == 0) code = -1;
1419             }
1420             lock_ReleaseMutex(&bp->mx);
1421                 }
1422
1423                 lock_ObtainWrite(&buf_globalLock);
1424                 buf_LockedRelease(bp);
1425         nbp = bp->fileHashp;
1426         if (nbp) nbp->refCount++;
1427                 lock_ReleaseWrite(&buf_globalLock);
1428         }       /* for loop over a bunch of buffers */
1429         
1430     /* done */
1431         return code;
1432 }
1433
1434 /* dump the contents of the buf_hashTablepp. */
1435 int cm_DumpBufHashTable(FILE *outputFile, char *cookie)
1436 {
1437     int zilch;
1438     cm_buf_t *bp;
1439     char output[1024];
1440     int i;
1441   
1442         if (buf_hashTablepp == NULL)
1443                 return -1;
1444
1445     lock_ObtainRead(&buf_globalLock);
1446   
1447     sprintf(output, "%s - dumping buf_HashTable - buf_hashSize=%d\n", cookie, buf_hashSize);
1448     WriteFile(outputFile, output, strlen(output), &zilch, NULL);
1449   
1450     for (i = 0; i < buf_hashSize; i++)
1451     {
1452         for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) 
1453         {
1454             if (bp->refCount)
1455             {
1456                 sprintf(output, "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d,"
1457                         "vnode=%d, unique=%d), size=%d refCount=%d\n", 
1458                         cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume, 
1459                         bp->fid.vnode, bp->fid.unique, bp->size, bp->refCount);
1460                 WriteFile(outputFile, output, strlen(output), &zilch, NULL);
1461             }
1462         }
1463     }
1464   
1465     sprintf(output, "%s - Done dumping buf_HashTable.\n", cookie);
1466     WriteFile(outputFile, output, strlen(output), &zilch, NULL);
1467
1468     lock_ReleaseRead(&buf_globalLock);
1469     return 0;
1470 }
1471