windows-build-updates-20030314
[openafs.git] / src / WINNT / afsd / cm_buf.c
1 /*
2  * Copyright 2000, International Business Machines Corporation and others.
3  * All Rights Reserved.
4  * 
5  * This software has been released under the terms of the IBM Public
6  * License.  For details, see the LICENSE file in the top-level source
7  * directory or online at http://www.openafs.org/dl/license10.html
8  */
9
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
11
12 #include <afs/param.h>
13 #include <afs/stds.h>
14
15 #ifndef DJGPP
16 #include <windows.h>
17 #endif
18 #include <osi.h>
19 #include <malloc.h>
20 #include <stdio.h>
21 #include <assert.h>
22 #include <winnt/osi_malloc.h>
23
24 #include "afsd.h"
25
26 void afsi_log();
27
28 /* This module implements the buffer package used by the local transaction
29  * system (cm).  It is initialized by calling cm_Init, which calls buf_Init;
30  * it must be initalized before any of its main routines are called.
31  *
32  * Each buffer is hashed into a hash table by file ID and offset, and if its
33  * reference count is zero, it is also in a free list.
34  *
35  * There are two locks involved in buffer processing.  The global lock
36  * buf_globalLock protects all of the global variables defined in this module,
37  * the reference counts and hash pointers in the actual cm_buf_t structures,
38  * and the LRU queue pointers in the buffer structures.
39  *
40  * The mutexes in the buffer structures protect the remaining fields in the
41  * buffers, as well the data itself.
42  * 
43  * The locking hierarchy here is this:
44  * 
45  * - resv multiple simul. buffers reservation
46  * - lock buffer I/O flags
47  * - lock buffer's mutex
48  * - lock buf_globalLock
49  *
50  */
51
52 /* global debugging log */
53 osi_log_t *buf_logp = NULL;
54
55 /* Global lock protecting hash tables and free lists */
56 osi_rwlock_t buf_globalLock;
57
58 /* ptr to head of the free list (most recently used) and the
59  * tail (the guy to remove first).  We use osi_Q* functions
60  * to put stuff in buf_freeListp, and maintain the end
61  * pointer manually
62  */
63 cm_buf_t *buf_freeListp;
64 cm_buf_t *buf_freeListEndp;
65
66 /* a pointer to a list of all buffers, just so that we can find them
67  * easily for debugging, and for the incr syncer.  Locked under
68  * the global lock.
69  */
70 cm_buf_t *buf_allp;
71
72 /* defaults setup; these variables may be manually assigned into
73  * before calling cm_Init, as a way of changing these defaults.
74  */
75 long buf_nbuffers = CM_BUF_BUFFERS;
76 long buf_nOrigBuffers;
77 long buf_bufferSize = CM_BUF_SIZE;
78 long buf_hashSize = CM_BUF_HASHSIZE;
79
80 #ifndef DJGPP
81 static
82 HANDLE CacheHandle;
83
84 static
85 SYSTEM_INFO sysInfo;
86 #endif /* !DJGPP */
87
88 /* buffer reservation variables */
89 long buf_reservedBufs;
90 long buf_maxReservedBufs;
91 int buf_reserveWaiting;
92
93 /* callouts for reading and writing data, etc */
94 cm_buf_ops_t *cm_buf_opsp;
95
96 /* pointer to hash table; size computed dynamically */
97 cm_buf_t **buf_hashTablepp;
98
99 /* another hash table */
100 cm_buf_t **buf_fileHashTablepp;
101
102 #ifdef DISKCACHE95
103 /* for experimental disk caching support in Win95 client */
104 cm_buf_t *buf_diskFreeListp;
105 cm_buf_t *buf_diskFreeListEndp;
106 cm_buf_t *buf_diskAllp;
107 extern int cm_diskCacheEnabled;
108 #endif /* DISKCACHE95 */
109
110 /* hold a reference to an already held buffer */
111 void buf_Hold(cm_buf_t *bp)
112 {
113         lock_ObtainWrite(&buf_globalLock);
114         bp->refCount++;
115         lock_ReleaseWrite(&buf_globalLock);
116 }
117
118 /* incremental sync daemon.  Writes 1/10th of all the buffers every 5000 ms */
119 void buf_IncrSyncer(long parm)
120 {
121         cm_buf_t *bp;                   /* buffer we're hacking on; held */
122         long i;                         /* counter */
123         long nAtOnce;                   /* how many to do at once */
124         cm_req_t req;
125
126         lock_ObtainWrite(&buf_globalLock);
127         bp = buf_allp;
128         bp->refCount++;
129         lock_ReleaseWrite(&buf_globalLock);
130         nAtOnce = buf_nbuffers / 10;
131         while (1) {
132 #ifndef DJGPP
133                 i = SleepEx(5000, 1);
134                 if (i != 0) continue;
135 #else
136                 thrd_Sleep(5000);
137 #endif /* DJGPP */
138                 
139                 /* now go through our percentage of the buffers */
140                 for(i=0; i<nAtOnce; i++) {
141                         /* don't want its identity changing while we're
142                          * messing with it, so must do all of this with
143                          * bp held.
144                          */
145
146                         /* start cleaning the buffer; don't touch log pages since
147                          * the log code counts on knowing exactly who is writing
148                          * a log page at any given instant.
149                          */
150                         cm_InitReq(&req);
151                         req.flags |= CM_REQ_NORETRY;
152                         buf_CleanAsync(bp, &req);
153
154                         /* now advance to the next buffer; the allp chain never changes,
155                          * and so can be followed even when holding no locks.
156                          */
157                         lock_ObtainWrite(&buf_globalLock);
158                         buf_LockedRelease(bp);
159                         bp = bp->allp;
160                         if (!bp) bp = buf_allp;
161                         bp->refCount++;
162                         lock_ReleaseWrite(&buf_globalLock);
163                 }       /* for loop over a bunch of buffers */
164         }               /* whole daemon's while loop */
165 }
166
167 #ifndef DJGPP
168 /* Create a security attribute structure suitable for use when the cache file
169  * is created.  What we mainly want is that only the administrator should be
170  * able to do anything with the file.  We create an ACL with only one entry,
171  * an entry that grants all rights to the administrator.
172  */
173 PSECURITY_ATTRIBUTES CreateCacheFileSA()
174 {
175         PSECURITY_ATTRIBUTES psa;
176         PSECURITY_DESCRIPTOR psd;
177         SID_IDENTIFIER_AUTHORITY authority = SECURITY_NT_AUTHORITY;
178         PSID AdminSID;
179         DWORD AdminSIDlength;
180         PACL AdminOnlyACL;
181         DWORD ACLlength;
182
183         /* Get Administrator SID */
184         AllocateAndInitializeSid(&authority, 2,
185                                  SECURITY_BUILTIN_DOMAIN_RID,
186                                  DOMAIN_ALIAS_RID_ADMINS,
187                                  0, 0, 0, 0, 0, 0,
188                                  &AdminSID);
189
190         /* Create Administrator-only ACL */
191         AdminSIDlength = GetLengthSid(AdminSID);
192         ACLlength = sizeof(ACL) + sizeof(ACCESS_ALLOWED_ACE)
193                         + AdminSIDlength - sizeof(DWORD);
194         AdminOnlyACL = GlobalAlloc(GMEM_FIXED, ACLlength);
195         InitializeAcl(AdminOnlyACL, ACLlength, ACL_REVISION);
196         AddAccessAllowedAce(AdminOnlyACL, ACL_REVISION,
197                             STANDARD_RIGHTS_ALL | SPECIFIC_RIGHTS_ALL,
198                             AdminSID);
199
200         /* Create security descriptor */
201         psd = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_DESCRIPTOR));
202         InitializeSecurityDescriptor(psd, SECURITY_DESCRIPTOR_REVISION);
203         SetSecurityDescriptorDacl(psd, TRUE, AdminOnlyACL, FALSE);
204
205         /* Create security attributes structure */
206         psa = GlobalAlloc(GMEM_FIXED, sizeof(SECURITY_ATTRIBUTES));
207         psa->nLength = sizeof(SECURITY_ATTRIBUTES);
208         psa->lpSecurityDescriptor = psd;
209         psa->bInheritHandle = TRUE;
210
211         return psa;
212 }
213 #endif /* !DJGPP */
214
215 #ifndef DJGPP
216 /* Free a security attribute structure created by CreateCacheFileSA() */
217 VOID FreeCacheFileSA(PSECURITY_ATTRIBUTES psa)
218 {
219         BOOL b1, b2;
220         PACL pAcl;
221
222         GetSecurityDescriptorDacl(psa->lpSecurityDescriptor, &b1, &pAcl, &b2);
223         GlobalFree(pAcl);
224         GlobalFree(psa->lpSecurityDescriptor);
225         GlobalFree(psa);
226 }
227 #endif /* !DJGPP */
228         
229 /* initialize the buffer package; called with no locks
230  * held during the initialization phase.
231  */
232 long buf_Init(cm_buf_ops_t *opsp)
233 {
234         static osi_once_t once;
235         cm_buf_t *bp;
236         long sectorSize;
237         thread_t phandle;
238 #ifndef DJGPP
239         HANDLE hf, hm;
240         PSECURITY_ATTRIBUTES psa;
241 #endif /* !DJGPP */
242         long i;
243         unsigned long pid;
244         char *data;
245         long cs;
246
247 #ifndef DJGPP
248         /* Get system info; all we really want is the allocation granularity */ 
249         GetSystemInfo(&sysInfo);
250 #endif /* !DJGPP */
251
252         /* Have to be able to reserve a whole chunk */
253         if (((buf_nbuffers - 3) * buf_bufferSize) < cm_chunkSize)
254                 return CM_ERROR_TOOFEWBUFS;
255
256         /* recall for callouts */
257         cm_buf_opsp = opsp;
258
259         if (osi_Once(&once)) {
260                 /* initialize global locks */
261                 lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
262
263 #ifndef DJGPP
264                 /*
265                  * Cache file mapping constrained by
266                  * system allocation granularity;
267                  * round up, assuming granularity is a power of two
268                  */
269                 cs = buf_nbuffers * buf_bufferSize;
270                 cs = (cs + (sysInfo.dwAllocationGranularity - 1))
271                         & ~(sysInfo.dwAllocationGranularity - 1);
272                 if (cs != buf_nbuffers * buf_bufferSize) {
273                         buf_nbuffers = cs / buf_bufferSize;
274                         afsi_log("Cache size rounded up to %d buffers",
275                                  buf_nbuffers);
276                 }
277 #endif /* !DJGPP */
278
279                 /* remember this for those who want to reset it */
280                 buf_nOrigBuffers = buf_nbuffers;
281
282                 /* lower hash size to a prime number */
283                 buf_hashSize = osi_PrimeLessThan(buf_hashSize);
284
285                 /* create hash table */
286                 buf_hashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
287                 memset((void *)buf_hashTablepp, 0,
288                         buf_hashSize * sizeof(cm_buf_t *));
289
290                 /* another hash table */
291                 buf_fileHashTablepp = malloc(buf_hashSize * sizeof(cm_buf_t *));
292                 memset((void *)buf_fileHashTablepp, 0,
293                         buf_hashSize * sizeof(cm_buf_t *));
294                 
295                 /* min value for which this works */
296                 sectorSize = 1;
297
298 #ifndef DJGPP
299                 /* Reserve buffer space by mapping cache file */
300                 psa = CreateCacheFileSA();
301                 hf = CreateFile(cm_CachePath,
302                         GENERIC_READ | GENERIC_WRITE,
303                         FILE_SHARE_READ | FILE_SHARE_WRITE,
304                         psa,
305                         OPEN_ALWAYS,
306                         FILE_ATTRIBUTE_NORMAL,
307                         NULL);
308                 if (hf == INVALID_HANDLE_VALUE) {
309                         afsi_log("create file error %d", GetLastError());
310                         return CM_ERROR_INVAL;
311                 }
312                 FreeCacheFileSA(psa);
313                 CacheHandle = hf;
314                 hm = CreateFileMapping(hf,
315                         NULL,
316                         PAGE_READWRITE,
317                         0, buf_nbuffers * buf_bufferSize,
318                         NULL);
319                 if (hm == NULL) {
320                         if (GetLastError() == ERROR_DISK_FULL) {
321                                 afsi_log("Error creating cache file mapping: disk full");
322                                 return CM_ERROR_TOOMANYBUFS;
323                         }
324                         return CM_ERROR_INVAL;
325                 }
326                 data = MapViewOfFile(hm,
327                         FILE_MAP_ALL_ACCESS,
328                         0, 0,
329                         buf_nbuffers * buf_bufferSize);
330                 if (data == NULL) {
331                         CloseHandle(hf);
332                         CloseHandle(hm);
333                         return CM_ERROR_INVAL;
334                 }
335                 CloseHandle(hm);
336 #else
337                 /* djgpp doesn't support memory mapped files */
338                 data = malloc(buf_nbuffers * buf_bufferSize);
339 #endif /* !DJGPP */
340
341                 /* create buffer headers and put in free list */
342                 bp = malloc(buf_nbuffers * sizeof(cm_buf_t));
343                 buf_allp = NULL;
344                 for(i=0; i<buf_nbuffers; i++) {
345                         /* allocate and zero some storage */
346                         memset(bp, 0, sizeof(cm_buf_t));
347
348                         /* thread on list of all buffers */
349                         bp->allp = buf_allp;
350                         buf_allp = bp;
351                         
352                         osi_QAdd((osi_queue_t **)&buf_freeListp, &bp->q);
353                         bp->flags |= CM_BUF_INLRU;
354                         lock_InitializeMutex(&bp->mx, "Buffer mutex");
355
356                         /* grab appropriate number of bytes from aligned zone */
357                         bp->datap = data;
358
359                         /* setup last buffer pointer */
360                         if (i == 0)
361                                 buf_freeListEndp = bp;
362
363                         /* next */
364                         bp++;
365                         data += buf_bufferSize;
366                 }
367                 
368                 /* none reserved at first */
369                 buf_reservedBufs = 0;
370                 
371                 /* just for safety's sake */
372                 buf_maxReservedBufs = buf_nbuffers - 3;
373                 
374                 /* init the buffer trace log */
375                 buf_logp = osi_LogCreate("buffer", 10);
376
377                 osi_EndOnce(&once);
378                 
379                 /* and create the incr-syncer */
380                 phandle = thrd_Create(0, 0,
381                                       (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
382                                       "buf_IncrSyncer");
383
384                 osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
385 #ifndef DJGPP
386                 CloseHandle(phandle);
387 #endif /* !DJGPP */
388         }
389
390         return 0;
391 }
392
393 /* add nbuffers to the buffer pool, if possible.
394  * Called with no locks held.
395  */
396 long buf_AddBuffers(long nbuffers)
397 {
398         cm_buf_t *bp;
399         int i;
400         char *data;
401 #ifndef DJGPP
402         HANDLE hm;
403         long cs;
404
405         /*
406          * Cache file mapping constrained by
407          * system allocation granularity;
408          * round up, assuming granularity is a power of two;
409          * assume existing cache size is already rounded
410          */
411         cs = nbuffers * buf_bufferSize;
412         cs = (cs + (sysInfo.dwAllocationGranularity - 1))
413                 & ~(sysInfo.dwAllocationGranularity - 1);
414         if (cs != nbuffers * buf_bufferSize) {
415                 nbuffers = cs / buf_bufferSize;
416         }
417
418         /* Reserve additional buffer space by remapping cache file */
419         hm = CreateFileMapping(CacheHandle,
420                 NULL,
421                 PAGE_READWRITE,
422                 0, (buf_nbuffers + nbuffers) * buf_bufferSize,
423                 NULL);
424         if (hm == NULL) {
425                 if (GetLastError() == ERROR_DISK_FULL)
426                         return CM_ERROR_TOOMANYBUFS;
427                 else
428                         return CM_ERROR_INVAL;
429         }
430         data = MapViewOfFile(hm,
431                 FILE_MAP_ALL_ACCESS,
432                 0, buf_nbuffers * buf_bufferSize,
433                 nbuffers * buf_bufferSize);
434         if (data == NULL) {
435                 CloseHandle(hm);
436                 return CM_ERROR_INVAL;
437         }
438         CloseHandle(hm);
439 #else
440         data = malloc(buf_nbuffers * buf_bufferSize);
441 #endif /* DJGPP */
442
443         /* Create buffer headers and put in free list */
444         bp = malloc(nbuffers * sizeof(*bp));
445
446         for(i=0; i<nbuffers; i++) {
447                 memset(bp, 0, sizeof(*bp));
448         
449                 lock_InitializeMutex(&bp->mx, "cm_buf_t");
450
451                 /* grab appropriate number of bytes from aligned zone */
452                 bp->datap = data;
453
454                 bp->flags |= CM_BUF_INLRU;
455                 
456                 lock_ObtainWrite(&buf_globalLock);
457                 /* note that buf_allp chain is covered by buf_globalLock now */
458                 bp->allp = buf_allp;
459                 buf_allp = bp;
460                 osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
461                 if (!buf_freeListEndp) buf_freeListEndp = bp;
462                 buf_nbuffers++;
463                 lock_ReleaseWrite(&buf_globalLock);
464
465                 bp++;
466                 data += buf_bufferSize;
467         
468         }        /* for loop over all buffers */
469
470         return 0;
471 }
472
473 /* interface to set the number of buffers to an exact figure.
474  * Called with no locks held.
475  */
476 long buf_SetNBuffers(long nbuffers)
477 {
478         if (nbuffers < 10) return CM_ERROR_INVAL;
479         if (nbuffers == buf_nbuffers) return 0;
480         else if (nbuffers > buf_nbuffers)
481                 return buf_AddBuffers(nbuffers - buf_nbuffers);
482         else return CM_ERROR_INVAL;
483 }
484
485 /* release a buffer.  Buffer must be referenced, but unlocked. */
486 void buf_Release(cm_buf_t *bp)
487 {
488         lock_ObtainWrite(&buf_globalLock);
489         buf_LockedRelease(bp);
490         lock_ReleaseWrite(&buf_globalLock);
491 }
492
493 /* wait for reading or writing to clear; called with write-locked
494  * buffer, and returns with locked buffer.
495  */
496 void buf_WaitIO(cm_buf_t *bp)
497 {
498         while (1) {
499                 /* if no IO is happening, we're done */
500                 if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
501                         break;
502                 
503                 /* otherwise I/O is happening, but some other thread is waiting for
504                  * the I/O already.  Wait for that guy to figure out what happened,
505                  * and then check again.
506                  */
507                 bp->flags |= CM_BUF_WAITING;
508                 osi_SleepM((long) bp, &bp->mx);
509                 lock_ObtainMutex(&bp->mx);
510                 osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%x", bp);
511         }
512         
513         /* if we get here, the IO is done, but we may have to wakeup people waiting for
514          * the I/O to complete.  Do so.
515          */
516         if (bp->flags & CM_BUF_WAITING) {
517                 bp->flags &= ~CM_BUF_WAITING;
518                 osi_Wakeup((long) bp);
519         }
520         osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%x", (long) bp);
521 }
522
523 /* code to drop reference count while holding buf_globalLock */
524 void buf_LockedRelease(cm_buf_t *bp)
525 {
526         /* ensure that we're in the LRU queue if our ref count is 0 */
527         osi_assert(bp->refCount > 0);
528         if (--bp->refCount == 0) {
529                 if (!(bp->flags & CM_BUF_INLRU)) {
530                         osi_QAdd((osi_queue_t **) &buf_freeListp, &bp->q);
531
532                         /* watch for transition from empty to one element */
533                         if (!buf_freeListEndp)
534                                 buf_freeListEndp = buf_freeListp;
535                         bp->flags |= CM_BUF_INLRU;
536                 }
537         }
538 }
539
540 /* find a buffer, if any, for a particular file ID and offset.  Assumes
541  * that buf_globalLock is write locked when called.
542  */
543 cm_buf_t *buf_LockedFind(struct cm_scache *scp, osi_hyper_t *offsetp)
544 {
545         long i;
546         cm_buf_t *bp;
547         
548         i = BUF_HASH(&scp->fid, offsetp);
549         for(bp = buf_hashTablepp[i]; bp; bp=bp->hashp) {
550                 if (cm_FidCmp(&scp->fid, &bp->fid) == 0
551                         && offsetp->LowPart == bp->offset.LowPart
552                         && offsetp->HighPart == bp->offset.HighPart) {
553                         bp->refCount++;
554                         break;
555                 }
556         }
557         
558         /* return whatever we found, if anything */
559         return bp;
560 }
561
562 /* find a buffer with offset *offsetp for vnode *scp.  Called
563  * with no locks held.
564  */
565 cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
566 {
567         cm_buf_t *bp;
568
569         lock_ObtainWrite(&buf_globalLock);
570         bp = buf_LockedFind(scp, offsetp);
571         lock_ReleaseWrite(&buf_globalLock);
572
573         return bp;
574 }
575
576 /* start cleaning I/O on this buffer.  Buffer must be write locked, and is returned
577  * write-locked.
578  *
579  * Makes sure that there's only one person writing this block
580  * at any given time, and also ensures that the log is forced sufficiently far,
581  * if this buffer contains logged data.
582  */
583 void buf_LockedCleanAsync(cm_buf_t *bp, cm_req_t *reqp)
584 {
585         long code;
586
587         code = 0;
588         while ((bp->flags & (CM_BUF_WRITING | CM_BUF_DIRTY)) == CM_BUF_DIRTY) {
589                 lock_ReleaseMutex(&bp->mx);
590
591                 code = (*cm_buf_opsp->Writep)(&bp->fid, &bp->offset,
592                                                 buf_bufferSize, 0, bp->userp,
593                                                 reqp);
594                 
595                 lock_ObtainMutex(&bp->mx);
596                 if (code) break;
597
598 #ifdef DISKCACHE95
599                 /* Disk cache support */
600                 /* write buffer to disk cache (synchronous for now) */
601                 diskcache_Update(bp->dcp, bp->datap, buf_bufferSize, bp->dataVersion);
602 #endif /* DISKCACHE95 */
603         };
604
605         /* do logging after call to GetLastError, or else */
606         osi_Log2(buf_logp, "buf_CleanAsync starts I/O on 0x%x, done=%d", bp, code);
607         
608         /* if someone was waiting for the I/O that just completed or failed,
609          * wake them up.
610          */
611         if (bp->flags & CM_BUF_WAITING) {
612                 /* turn off flags and wakeup users */
613                 bp->flags &= ~CM_BUF_WAITING;
614                 osi_Wakeup((long) bp);
615         }
616 }
617
618 /* Called with a zero-ref count buffer and with the buf_globalLock write locked.
619  * recycles the buffer, and leaves it ready for reuse with a ref count of 1.
620  * The buffer must already be clean, and no I/O should be happening to it.
621  */
622 void buf_Recycle(cm_buf_t *bp)
623 {
624         int i;
625         cm_buf_t **lbpp;
626         cm_buf_t *tbp;
627         cm_buf_t *prevBp, *nextBp;
628
629         /* if we get here, we know that the buffer still has a 0 ref count,
630          * and that it is clean and has no currently pending I/O.  This is
631          * the dude to return.
632          * Remember that as long as the ref count is 0, we know that we won't
633          * have any lock conflicts, so we can grab the buffer lock out of
634          * order in the locking hierarchy.
635          */
636         osi_Log2(buf_logp,
637                 "buf_Recycle recycles 0x%x, off 0x%x",
638                 bp, bp->offset.LowPart);
639
640         osi_assert(bp->refCount == 0);
641         osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)));
642         lock_AssertWrite(&buf_globalLock);
643
644         if (bp->flags & CM_BUF_INHASH) {
645                 /* Remove from hash */
646
647                 i = BUF_HASH(&bp->fid, &bp->offset);
648                 lbpp = &(buf_hashTablepp[i]);
649                 for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
650                         if (tbp == bp) break;
651                 }
652
653                 /* we better find it */
654                 osi_assertx(tbp != NULL, "buf_GetNewLocked: hash table screwup");
655
656                 *lbpp = bp->hashp;      /* hash out */
657
658                 /* Remove from file hash */
659
660                 i = BUF_FILEHASH(&bp->fid);
661                 prevBp = bp->fileHashBackp;
662                 nextBp = bp->fileHashp;
663                 if (prevBp)
664                         prevBp->fileHashp = nextBp;
665                 else
666                         buf_fileHashTablepp[i] = nextBp;
667                 if (nextBp)
668                         nextBp->fileHashBackp = prevBp;
669
670                 bp->flags &= ~CM_BUF_INHASH;
671         }
672                         
673         /* bump the soft reference counter now, to invalidate softRefs; no
674          * wakeup is required since people don't sleep waiting for this
675          * counter to change.
676          */
677         bp->idCounter++;
678
679         /* make the fid unrecognizable */
680         memset(&bp->fid, 0, sizeof(bp->fid));
681 }
682
683 /* recycle a buffer, removing it from the free list, hashing in its new identity
684  * and returning it write-locked so that no one can use it.  Called without
685  * any locks held, and can return an error if it loses the race condition and 
686  * finds that someone else created the desired buffer.
687  *
688  * If success is returned, the buffer is returned write-locked.
689  *
690  * May be called with null scp and offsetp, if we're just trying to reclaim some
691  * space from the buffer pool.  In that case, the buffer will be returned
692  * without being hashed into the hash table.
693  */
694 long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
695 {
696         cm_buf_t *bp;           /* buffer we're dealing with */
697         cm_buf_t *nextBp;       /* next buffer in file hash chain */
698         long i;                 /* temp */
699         cm_req_t req;
700
701         cm_InitReq(&req);       /* just in case */
702
703         while(1) {
704 retry:
705                 lock_ObtainWrite(&buf_globalLock);
706                 /* check to see if we lost the race */
707                 if (scp) {
708                         if (bp = buf_LockedFind(scp, offsetp)) {
709                                 bp->refCount--;
710                                 lock_ReleaseWrite(&buf_globalLock);
711                                 return CM_BUF_EXISTS;
712                         }
713                 }
714                 
715                 /* for debugging, assert free list isn't empty, although we
716                  * really should try waiting for a running tranasction to finish
717                  * instead of this; or better, we should have a transaction
718                  * throttler prevent us from entering this situation.
719                  */
720                 osi_assertx(buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
721
722                 /* look at all buffers in free list, some of which may temp.
723                  * have high refcounts and which then should be skipped,
724                  * starting cleaning I/O for those which are dirty.  If we find
725                  * a clean buffer, we rehash it, lock it and return it.
726                  */
727                 for(bp = buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
728                         /* check to see if it really has zero ref count.  This
729                          * code can bump refcounts, at least, so it may not be
730                          * zero.
731                          */
732                         if (bp->refCount > 0) continue;
733                         
734                         /* we don't have to lock buffer itself, since the ref
735                          * count is 0 and we know it will stay zero as long as
736                          * we hold the global lock.
737                          */
738
739                         /* don't recycle someone in our own chunk */
740                         if (!cm_FidCmp(&bp->fid, &scp->fid)
741                             && (bp->offset.LowPart & (-cm_chunkSize))
742                                   == (offsetp->LowPart & (-cm_chunkSize)))
743                                 continue;
744
745                         /* if this page is being filled (!) or cleaned, see if
746                          * the I/O has completed.  If not, skip it, otherwise
747                          * do the final processing for the I/O.
748                          */
749                         if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
750                                 /* probably shouldn't do this much work while
751                                  * holding the big lock?  Watch for contention
752                                  * here.
753                                  */
754                                 continue;
755                         }
756                         
757                         if (bp->flags & CM_BUF_DIRTY) {
758                                 /* if the buffer is dirty, start cleaning it and
759                                  * move on to the next buffer.  We do this with
760                                  * just the lock required to minimize contention
761                                  * on the big lock.
762                                  */
763                                 bp->refCount++;
764                                 lock_ReleaseWrite(&buf_globalLock);
765
766                                 /* grab required lock and clean; this only
767                                  * starts the I/O.  By the time we're back,
768                                  * it'll still be marked dirty, but it will also
769                                  * have the WRITING flag set, so we won't get
770                                  * back here.
771                                  */
772                                 buf_CleanAsync(bp, &req);
773                                 
774                                 /* now put it back and go around again */
775                                 buf_Release(bp);
776                                 goto retry;
777                         }
778                         
779                         /* if we get here, we know that the buffer still has a 0
780                          * ref count, and that it is clean and has no currently
781                          * pending I/O.  This is the dude to return.
782                          * Remember that as long as the ref count is 0, we know
783                          * that we won't have any lock conflicts, so we can grab
784                          * the buffer lock out of order in the locking hierarchy.
785                          */
786                         buf_Recycle(bp);
787
788                         /* clean up junk flags */
789                         bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
790                         bp->dataVersion = -1;   /* unknown so far */
791
792                         /* now hash in as our new buffer, and give it the
793                          * appropriate label, if requested.
794                          */
795                         if (scp) {
796                                 bp->flags |= CM_BUF_INHASH;
797                                 bp->fid = scp->fid;
798                                 bp->offset = *offsetp;
799                                 i = BUF_HASH(&scp->fid, offsetp);
800                                 bp->hashp = buf_hashTablepp[i];
801                                 buf_hashTablepp[i] = bp;
802                                 i = BUF_FILEHASH(&scp->fid);
803                                 nextBp = buf_fileHashTablepp[i];
804                                 bp->fileHashp = nextBp;
805                                 bp->fileHashBackp = NULL;
806                                 if (nextBp)
807                                         nextBp->fileHashBackp = bp;
808                                 buf_fileHashTablepp[i] = bp;
809                         }
810                         
811                         /* prepare to return it.  Start by giving it a good
812                          * refcount */
813                         bp->refCount = 1;
814                         
815                         /* and since it has a non-zero ref count, we should move
816                          * it from the lru queue.  It better be still there,
817                          * since we've held the global (big) lock since we found
818                          * it there.
819                          */
820                         osi_assertx(bp->flags & CM_BUF_INLRU,
821                                     "buf_GetNewLocked: LRU screwup");
822                         if (buf_freeListEndp == bp) {
823                                 /* we're the last guy in this queue, so maintain it */
824                                 buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
825                         }
826                         osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
827                         bp->flags &= ~CM_BUF_INLRU;
828                         
829                         /* finally, grab the mutex so that people don't use it
830                          * before the caller fills it with data.  Again, no one 
831                          * should have been able to get to this dude to lock it.
832                          */
833                         osi_assertx(lock_TryMutex(&bp->mx),
834                                     "buf_GetNewLocked: TryMutex failed");
835
836                         lock_ReleaseWrite(&buf_globalLock);
837                         *bufpp = bp;
838                         return 0;
839                 } /* for all buffers in lru queue */
840                 lock_ReleaseWrite(&buf_globalLock);
841         }       /* while loop over everything */
842         /* not reached */
843 } /* the proc */
844
845 /* get a page, returning it held but unlocked.  Doesn't fill in the page
846  * with I/O, since we're going to write the whole thing new.
847  */
848 long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
849 {
850         cm_buf_t *bp;
851         long code;
852         osi_hyper_t pageOffset;
853         int created;
854
855         created = 0;
856         pageOffset.HighPart = offsetp->HighPart;
857         pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
858         while (1) {
859                 lock_ObtainWrite(&buf_globalLock);
860                 bp = buf_LockedFind(scp, &pageOffset);
861                 lock_ReleaseWrite(&buf_globalLock);
862                 if (bp) {
863                         /* lock it and break out */
864                         lock_ObtainMutex(&bp->mx);
865                         break;
866                 }
867                 
868                 /* otherwise, we have to create a page */
869                 code = buf_GetNewLocked(scp, &pageOffset, &bp);
870
871                 /* check if the buffer was created in a race condition branch.
872                  * If so, go around so we can hold a reference to it. 
873                  */
874                 if (code == CM_BUF_EXISTS) continue;
875                 
876                 /* something else went wrong */
877                 if (code != 0) return code;
878                 
879                 /* otherwise, we have a locked buffer that we just created */
880                 created = 1;
881                 break;
882         } /* big while loop */
883         
884         /* wait for reads */
885         if (bp->flags & CM_BUF_READING)
886                 buf_WaitIO(bp);
887
888         /* once it has been read once, we can unlock it and return it, still
889          * with its refcount held.
890          */
891         lock_ReleaseMutex(&bp->mx);
892         *bufpp = bp;
893         osi_Log3(buf_logp, "buf_GetNew returning bp 0x%x for file 0x%x, offset 0x%x",
894                 bp, (long) scp, offsetp->LowPart);
895         return 0;
896 }
897
898 /* get a page, returning it held but unlocked.  Make sure it is complete */
899 long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
900 {
901         cm_buf_t *bp;
902         long code;
903         osi_hyper_t pageOffset;
904         unsigned long tcount;
905         int created;
906 #ifdef DISKCACHE95
907         cm_diskcache_t *dcp;
908 #endif /* DISKCACHE95 */
909
910         created = 0;
911         pageOffset.HighPart = offsetp->HighPart;
912         pageOffset.LowPart = offsetp->LowPart & ~(buf_bufferSize-1);
913         while (1) {
914                 lock_ObtainWrite(&buf_globalLock);
915                 bp = buf_LockedFind(scp, &pageOffset);
916                 lock_ReleaseWrite(&buf_globalLock);
917                 if (bp) {
918                         /* lock it and break out */
919                         lock_ObtainMutex(&bp->mx);
920                         break;
921
922 #ifdef DISKCACHE95
923                         /* touch disk chunk to update LRU info */
924                         diskcache_Touch(bp->dcp);
925 #endif /* DISKCACHE95 */
926                 }
927                 
928                 /* otherwise, we have to create a page */
929                 code = buf_GetNewLocked(scp, &pageOffset, &bp);
930
931                 /* check if the buffer was created in a race condition branch.
932                  * If so, go around so we can hold a reference to it. 
933                  */
934                 if (code == CM_BUF_EXISTS) continue;
935                 
936                 /* something else went wrong */
937                 if (code != 0) return code;
938                 
939                 /* otherwise, we have a locked buffer that we just created */
940                 created = 1;
941                 break;
942         } /* big while loop */
943         
944         /* if we get here, we have a locked buffer that may have just been
945          * created, in which case it needs to be filled with data.
946          */
947         if (created) {
948                 /* load the page; freshly created pages should be idle */
949                 osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)));
950
951                 /* setup offset, event */
952 #ifndef DJGPP  /* doesn't seem to be used */
953                 bp->over.Offset = bp->offset.LowPart;
954                 bp->over.OffsetHigh = bp->offset.HighPart;
955 #endif /* !DJGPP */
956
957                 /* start the I/O; may drop lock */
958                 bp->flags |= CM_BUF_READING;
959                 code = (*cm_buf_opsp->Readp)(bp, buf_bufferSize, &tcount, NULL);
960
961 #ifdef DISKCACHE95
962                 code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, buf_bufferSize, &bp->dataVersion, &tcount, &dcp);
963                 bp->dcp = dcp;    /* pointer to disk cache struct. */
964 #endif /* DISKCACHE95 */
965
966                 if (code != 0) {
967                         /* failure or queued */
968 #ifndef DJGPP   /* cm_bufRead always returns 0 */
969                         if (code != ERROR_IO_PENDING) {
970 #endif
971                                 bp->error = code;
972                                 bp->flags |= CM_BUF_ERROR;
973                                 bp->flags &= ~CM_BUF_READING;
974                                 if (bp->flags & CM_BUF_WAITING) {
975                                         bp->flags &= ~CM_BUF_WAITING;
976                                         osi_Wakeup((long) bp);
977                                 }
978                                 lock_ReleaseMutex(&bp->mx);
979                                 buf_Release(bp);
980                                 return code;
981 #ifndef DJGPP
982                         }
983 #endif
984                 } else {
985                         /* otherwise, I/O completed instantly and we're done, except
986                          * for padding the xfr out with 0s and checking for EOF
987                          */
988                         if (tcount < (unsigned long) buf_bufferSize) {
989                                 memset(bp->datap+tcount, 0, buf_bufferSize - tcount);
990                                 if (tcount == 0)
991                                         bp->flags |= CM_BUF_EOF;
992                         }
993                         bp->flags &= ~CM_BUF_READING;
994                         if (bp->flags & CM_BUF_WAITING) {
995                                 bp->flags &= ~CM_BUF_WAITING;
996                                 osi_Wakeup((long) bp);
997                         }
998                 }
999                         
1000         } /* if created */
1001         
1002         /* wait for reads, either that which we started above, or that someone
1003          * else started.  We don't care if we return a buffer being cleaned.
1004          */
1005         if (bp->flags & CM_BUF_READING)
1006                 buf_WaitIO(bp);
1007
1008         /* once it has been read once, we can unlock it and return it, still
1009          * with its refcount held.
1010          */
1011         lock_ReleaseMutex(&bp->mx);
1012         *bufpp = bp;
1013
1014         /* now remove from queue; will be put in at the head (farthest from
1015          * being recycled) when we're done in buf_Release.
1016          */
1017         lock_ObtainWrite(&buf_globalLock);
1018         if (bp->flags & CM_BUF_INLRU) {
1019                 if (buf_freeListEndp == bp)
1020                         buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
1021                 osi_QRemove((osi_queue_t **) &buf_freeListp, &bp->q);
1022                 bp->flags &= ~CM_BUF_INLRU;
1023         }
1024         lock_ReleaseWrite(&buf_globalLock);
1025
1026         osi_Log3(buf_logp, "buf_Get returning bp 0x%x for file 0x%x, offset 0x%x",
1027                 bp, (long) scp, offsetp->LowPart);
1028         return 0;
1029 }
1030
1031 /* count # of elements in the free list;
1032  * we don't bother doing the proper locking for accessing dataVersion or flags
1033  * since it is a pain, and this is really just an advisory call.  If you need
1034  * to do better at some point, rewrite this function.
1035  */
1036 long buf_CountFreeList(void)
1037 {
1038         long count;
1039         cm_buf_t *bufp;
1040
1041         count = 0;
1042         lock_ObtainRead(&buf_globalLock);
1043         for(bufp = buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
1044                 /* if the buffer doesn't have an identity, or if the buffer
1045                  * has been invalidate (by having its DV stomped upon), then
1046                  * count it as free, since it isn't really being utilized.
1047                  */
1048                 if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
1049                         count++;
1050         }
1051         lock_ReleaseRead(&buf_globalLock);
1052         return count;
1053 }
1054
1055 /* clean a buffer synchronously */
1056 void buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
1057 {
1058         lock_ObtainMutex(&bp->mx);
1059         buf_LockedCleanAsync(bp, reqp);
1060         lock_ReleaseMutex(&bp->mx);
1061 }
1062
1063 /* wait for a buffer's cleaning to finish */
1064 void buf_CleanWait(cm_buf_t *bp)
1065 {
1066         lock_ObtainMutex(&bp->mx);
1067         if (bp->flags & CM_BUF_WRITING) {
1068                 buf_WaitIO(bp);
1069         }
1070         lock_ReleaseMutex(&bp->mx);
1071 }
1072
1073 /* set the dirty flag on a buffer, and set associated write-ahead log,
1074  * if there is one.  Allow one to be added to a buffer, but not changed.
1075  *
1076  * The buffer must be locked before calling this routine.
1077  */
1078 void buf_SetDirty(cm_buf_t *bp)
1079 {
1080         osi_assert(bp->refCount > 0);
1081         
1082         osi_Log1(buf_logp, "buf_SetDirty 0x%x", bp);
1083
1084         /* set dirty bit */
1085         bp->flags |= CM_BUF_DIRTY;
1086
1087         /* and turn off EOF flag, since it has associated data now */
1088         bp->flags &= ~CM_BUF_EOF;
1089 }
1090
1091 /* clean all buffers, reset log pointers and invalidate all buffers.
1092  * Called with no locks held, and returns with same.
1093  *
1094  * This function is guaranteed to clean and remove the log ptr of all the
1095  * buffers that were dirty or had non-zero log ptrs before the call was
1096  * made.  That's sufficient to clean up any garbage left around by recovery,
1097  * which is all we're counting on this for; there may be newly created buffers
1098  * added while we're running, but that should be OK.
1099  *
1100  * In an environment where there are no transactions (artificially imposed, for
1101  * example, when switching the database to raw mode), this function is used to
1102  * make sure that all updates have been written to the disk.  In that case, we don't
1103  * really require that we forget the log association between pages and logs, but
1104  * it also doesn't hurt.  Since raw mode I/O goes through this buffer package, we don't
1105  * have to worry about invalidating data in the buffers.
1106  *
1107  * This function is used at the end of recovery as paranoia to get the recovered
1108  * database out to disk.  It removes all references to the recovery log and cleans
1109  * all buffers.
1110  */
1111 long buf_CleanAndReset(void)
1112 {
1113         long i;
1114         cm_buf_t *bp;
1115         cm_req_t req;
1116
1117         lock_ObtainWrite(&buf_globalLock);
1118         for(i=0; i<buf_hashSize; i++) {
1119                 for(bp = buf_hashTablepp[i]; bp; bp = bp->hashp) {
1120                         bp->refCount++;
1121                         lock_ReleaseWrite(&buf_globalLock);
1122                         
1123                         /* now no locks are held; clean buffer and go on */
1124                         cm_InitReq(&req);
1125                         buf_CleanAsync(bp, &req);
1126                         buf_CleanWait(bp);
1127                         
1128                         /* relock and release buffer */
1129                         lock_ObtainWrite(&buf_globalLock);
1130                         buf_LockedRelease(bp);
1131                 } /* over one bucket */
1132         }       /* for loop over all hash buckets */
1133         
1134         /* release locks */
1135         lock_ReleaseWrite(&buf_globalLock);
1136
1137         /* and we're done */
1138         return 0;
1139 }
1140
1141 /* called without global lock being held, reserves buffers for callers
1142  * that need more than one held (not locked) at once.
1143  */
1144 void buf_ReserveBuffers(long nbuffers)
1145 {
1146         lock_ObtainWrite(&buf_globalLock);
1147         while (1) {
1148                 if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
1149                         buf_reserveWaiting = 1;
1150                         osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
1151                         osi_SleepW((long) &buf_reservedBufs, &buf_globalLock);
1152                         lock_ObtainWrite(&buf_globalLock);
1153                 }
1154                 else {
1155                         buf_reservedBufs += nbuffers;
1156                         break;
1157                 }
1158         }
1159         lock_ReleaseWrite(&buf_globalLock);
1160 }
1161
1162 int buf_TryReserveBuffers(long nbuffers)
1163 {
1164         int code;
1165
1166         lock_ObtainWrite(&buf_globalLock);
1167         if (buf_reservedBufs + nbuffers > buf_maxReservedBufs) {
1168                 code = 0;
1169         }
1170         else {
1171                 buf_reservedBufs += nbuffers;
1172                 code = 1;
1173         }
1174         lock_ReleaseWrite(&buf_globalLock);
1175         return code;
1176 }
1177
1178 /* called without global lock held, releases reservation held by
1179  * buf_ReserveBuffers.
1180  */
1181 void buf_UnreserveBuffers(long nbuffers)
1182 {
1183         lock_ObtainWrite(&buf_globalLock);
1184         buf_reservedBufs -= nbuffers;
1185         if (buf_reserveWaiting) {
1186                 buf_reserveWaiting = 0;
1187                 osi_Wakeup((long) &buf_reservedBufs);
1188         }
1189         lock_ReleaseWrite(&buf_globalLock);
1190 }
1191
1192 /* truncate the buffers past sizep, zeroing out the page, if we don't
1193  * end on a page boundary.
1194  *
1195  * Requires cm_bufCreateLock to be write locked.
1196  */
1197 long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
1198         osi_hyper_t *sizep)
1199 {
1200         cm_buf_t *bufp;
1201         cm_buf_t *nbufp;                        /* next buffer, if didRelease */
1202         osi_hyper_t bufEnd;
1203         long code;
1204         long bufferPos;
1205         int didRelease;
1206         long i;
1207         
1208         /* assert that cm_bufCreateLock is held in write mode */
1209         lock_AssertWrite(&scp->bufCreateLock);
1210
1211         i = BUF_FILEHASH(&scp->fid);
1212
1213         lock_ObtainWrite(&buf_globalLock);
1214         bufp = buf_fileHashTablepp[i];
1215         if (bufp == NULL) {
1216                 lock_ReleaseWrite(&buf_globalLock);
1217                 return 0;
1218         }
1219
1220         bufp->refCount++;
1221         lock_ReleaseWrite(&buf_globalLock);
1222         for(; bufp; bufp = nbufp) {
1223                 didRelease = 0;
1224                 lock_ObtainMutex(&bufp->mx);
1225
1226                 bufEnd.HighPart = 0;
1227                 bufEnd.LowPart = buf_bufferSize;
1228                 bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
1229
1230                 if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
1231                         LargeIntegerLessThan(*sizep, bufEnd)) {
1232                         buf_WaitIO(bufp);
1233                 }
1234                 lock_ObtainMutex(&scp->mx);
1235         
1236                 /* make sure we have a callback (so we have the right value for
1237                  * the length), and wait for it to be safe to do a truncate.
1238                  */
1239                 code = cm_SyncOp(scp, bufp, userp, reqp, 0,
1240                                  CM_SCACHESYNC_NEEDCALLBACK
1241                                  | CM_SCACHESYNC_GETSTATUS
1242                                  | CM_SCACHESYNC_SETSIZE
1243                                  | CM_SCACHESYNC_BUFLOCKED);
1244                 /* if we succeeded in our locking, and this applies to the right
1245                  * file, and the truncate request overlaps the buffer either
1246                  * totally or partially, then do something.
1247                  */
1248                 if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
1249                         && LargeIntegerLessThan(*sizep, bufEnd)) {
1250                         
1251                         lock_ObtainWrite(&buf_globalLock);
1252
1253                         /* destroy the buffer, turning off its dirty bit, if
1254                          * we're truncating the whole buffer.  Otherwise, set
1255                          * the dirty bit, and clear out the tail of the buffer
1256                          * if we just overlap some.
1257                          */
1258                         if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
1259                                 /* truncating the entire page */
1260                                 bufp->flags &= ~CM_BUF_DIRTY;
1261                                 bufp->dataVersion = -1; /* known bad */
1262                                 bufp->dirtyCounter++;
1263                         }
1264                         else {
1265                                 /* don't set dirty, since dirty implies
1266                                  * currently up-to-date.  Don't need to do this,
1267                                  * since we'll update the length anyway.
1268                                  *
1269                                  * Zero out remainder of the page, in case we
1270                                  * seek and write past EOF, and make this data
1271                                  * visible again.
1272                                  */
1273                                 bufferPos = sizep->LowPart & (buf_bufferSize - 1);
1274                                 osi_assert(bufferPos != 0);
1275                                 memset(bufp->datap + bufferPos, 0,
1276                                         buf_bufferSize - bufferPos);
1277                         }
1278
1279                         lock_ReleaseWrite(&buf_globalLock);
1280
1281                 }
1282                 
1283                 lock_ReleaseMutex(&scp->mx);
1284                 lock_ReleaseMutex(&bufp->mx);
1285                 if (!didRelease) {
1286                         lock_ObtainWrite(&buf_globalLock);
1287                         nbufp = bufp->fileHashp;
1288                         if (nbufp) nbufp->refCount++;
1289                         buf_LockedRelease(bufp);
1290                         lock_ReleaseWrite(&buf_globalLock);
1291                 }
1292
1293                 /* bail out early if we fail */
1294                 if (code) {
1295                         /* at this point, nbufp is held; bufp has already been
1296                          * released.
1297                          */
1298                         if (nbufp) buf_Release(nbufp);
1299                         return code;
1300                 }
1301         }
1302         
1303         /* success */
1304         return 0;
1305 }
1306
1307 long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
1308 {
1309         long code;
1310         cm_buf_t *bp;           /* buffer we're hacking on */
1311         cm_buf_t *nbp;
1312         int didRelease;
1313         long i;
1314
1315         i = BUF_FILEHASH(&scp->fid);
1316
1317         code = 0;
1318         lock_ObtainWrite(&buf_globalLock);
1319         bp = buf_fileHashTablepp[i];
1320         if (bp) bp->refCount++;
1321         lock_ReleaseWrite(&buf_globalLock);
1322         for(; bp; bp = nbp) {
1323                 didRelease = 0; /* haven't released this buffer yet */
1324
1325                 /* clean buffer synchronously */
1326                 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1327                         lock_ObtainMutex(&bp->mx);
1328
1329                         /* start cleaning the buffer, and wait for it to finish */
1330                         buf_LockedCleanAsync(bp, reqp);
1331                         buf_WaitIO(bp);
1332                         lock_ReleaseMutex(&bp->mx);
1333
1334                         code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
1335                         if (code) goto skip;
1336
1337                         lock_ObtainWrite(&buf_globalLock);
1338                         /* actually, we only know that buffer is clean if ref
1339                          * count is 1, since we don't have buffer itself locked.
1340                          */
1341                         if (!(bp->flags & CM_BUF_DIRTY)) {
1342                                 if (bp->refCount == 1) {        /* bp is held above */
1343                                         buf_LockedRelease(bp);
1344                                         nbp = bp->fileHashp;
1345                                         if (nbp) nbp->refCount++;
1346                                         didRelease = 1;
1347                                         buf_Recycle(bp);
1348                                 }
1349                         }
1350                         lock_ReleaseWrite(&buf_globalLock);
1351
1352                         (*cm_buf_opsp->Unstabilizep)(scp, userp);
1353                 }
1354
1355 skip:
1356                 if (!didRelease) {
1357                         lock_ObtainWrite(&buf_globalLock);
1358                         if (nbp = bp->fileHashp) nbp->refCount++;
1359                         buf_LockedRelease(bp);
1360                         lock_ReleaseWrite(&buf_globalLock);
1361                 }
1362         }       /* for loop over a bunch of buffers */
1363         
1364         /* done */
1365         return code;
1366 }
1367
1368 long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
1369 {
1370         long code;
1371         cm_buf_t *bp;           /* buffer we're hacking on */
1372         cm_buf_t *nbp;          /* next one */
1373         long i;
1374
1375         i = BUF_FILEHASH(&scp->fid);
1376
1377         code = 0;
1378         lock_ObtainWrite(&buf_globalLock);
1379         bp = buf_fileHashTablepp[i];
1380         if (bp) bp->refCount++;
1381         lock_ReleaseWrite(&buf_globalLock);
1382         for(; bp; bp = nbp) {
1383                 /* clean buffer synchronously */
1384                 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1385                         if (userp) {
1386                                 lock_ObtainMutex(&bp->mx);
1387                                 if (bp->userp) cm_ReleaseUser(bp->userp);
1388                                 bp->userp = userp;
1389                                 lock_ReleaseMutex(&bp->mx);
1390                                 cm_HoldUser(userp);
1391                         }
1392                         buf_CleanAsync(bp, reqp);
1393                         buf_CleanWait(bp);
1394                         lock_ObtainMutex(&bp->mx);
1395                         if (bp->flags & CM_BUF_ERROR) {
1396                                 if (code == 0 || code == -1) code = bp->error;
1397                                 if (code == 0) code = -1;
1398                         }
1399                         lock_ReleaseMutex(&bp->mx);
1400                 }
1401
1402                 lock_ObtainWrite(&buf_globalLock);
1403                 buf_LockedRelease(bp);
1404                 nbp = bp->fileHashp;
1405                 if (nbp) nbp->refCount++;
1406                 lock_ReleaseWrite(&buf_globalLock);
1407         }       /* for loop over a bunch of buffers */
1408         
1409         /* done */
1410         return code;
1411 }