2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include <afs/param.h>
15 #include <sys/types.h>
20 #include <netinet/in.h>
27 #include "vlserver_internal.h"
29 struct vlheader xheader;
30 extern int maxnservers;
31 struct extentaddr extentaddr;
32 extern afs_uint32 rd_HostAddress[MAXSERVERID + 1];
33 extern afs_uint32 wr_HostAddress[MAXSERVERID + 1];
34 struct extentaddr *rd_ex_addr[VL_MAX_ADDREXTBLKS] = { 0, 0, 0, 0 };
35 struct extentaddr *wr_ex_addr[VL_MAX_ADDREXTBLKS] = { 0, 0, 0, 0 };
36 struct vlheader rd_cheader; /* kept in network byte order */
37 struct vlheader wr_cheader;
40 static int index_OK(struct vl_ctx *ctx, afs_int32 blockindex);
42 #define ERROR_EXIT(code) {error=(code); goto error_exit;}
44 /* Hashing algorithm based on the volume id; HASHSIZE must be prime */
46 IDHash(afs_int32 volumeid)
48 return ((abs(volumeid)) % HASHSIZE);
52 /* Hashing algorithm based on the volume name; name's size is implicit (64 chars) and if changed it should be reflected here. */
54 NameHash(char *volumename)
60 for (i = strlen(volumename), volumename += i - 1; i--; volumename--)
61 hash = (hash * 63) + (*((unsigned char *)volumename) - 63);
62 return (hash % HASHSIZE);
66 /* package up seek and write into one procedure for ease of use */
68 vlwrite(struct ubik_trans *trans, afs_int32 offset, void *buffer,
73 if ((errorcode = ubik_Seek(trans, 0, offset)))
75 return (ubik_Write(trans, buffer, length));
79 /* Package up seek and read into one procedure for ease of use */
81 vlread(struct ubik_trans *trans, afs_int32 offset, char *buffer,
86 if ((errorcode = ubik_Seek(trans, 0, offset)))
88 return (ubik_Read(trans, buffer, length));
92 /* take entry and convert to network order and write to disk */
94 vlentrywrite(struct ubik_trans *trans, afs_int32 offset, void *buffer,
97 struct vlentry oentry;
98 struct nvlentry nentry, *nep;
102 if (length != sizeof(oentry))
104 if (maxnservers == 13) {
105 nep = (struct nvlentry *)buffer;
106 for (i = 0; i < MAXTYPES; i++)
107 nentry.volumeId[i] = htonl(nep->volumeId[i]);
108 nentry.flags = htonl(nep->flags);
109 nentry.LockAfsId = htonl(nep->LockAfsId);
110 nentry.LockTimestamp = htonl(nep->LockTimestamp);
111 nentry.cloneId = htonl(nep->cloneId);
112 for (i = 0; i < MAXTYPES; i++)
113 nentry.nextIdHash[i] = htonl(nep->nextIdHash[i]);
114 nentry.nextNameHash = htonl(nep->nextNameHash);
115 memcpy(nentry.name, nep->name, VL_MAXNAMELEN);
116 memcpy(nentry.serverNumber, nep->serverNumber, NMAXNSERVERS);
117 memcpy(nentry.serverPartition, nep->serverPartition, NMAXNSERVERS);
118 memcpy(nentry.serverFlags, nep->serverFlags, NMAXNSERVERS);
119 bufp = (char *)&nentry;
121 memset(&oentry, 0, sizeof(struct vlentry));
122 nep = (struct nvlentry *)buffer;
123 for (i = 0; i < MAXTYPES; i++)
124 oentry.volumeId[i] = htonl(nep->volumeId[i]);
125 oentry.flags = htonl(nep->flags);
126 oentry.LockAfsId = htonl(nep->LockAfsId);
127 oentry.LockTimestamp = htonl(nep->LockTimestamp);
128 oentry.cloneId = htonl(nep->cloneId);
129 for (i = 0; i < MAXTYPES; i++)
130 oentry.nextIdHash[i] = htonl(nep->nextIdHash[i]);
131 oentry.nextNameHash = htonl(nep->nextNameHash);
132 memcpy(oentry.name, nep->name, VL_MAXNAMELEN);
133 memcpy(oentry.serverNumber, nep->serverNumber, OMAXNSERVERS);
134 memcpy(oentry.serverPartition, nep->serverPartition, OMAXNSERVERS);
135 memcpy(oentry.serverFlags, nep->serverFlags, OMAXNSERVERS);
136 bufp = (char *)&oentry;
138 return vlwrite(trans, offset, bufp, length);
141 /* read entry and convert to host order and write to disk */
143 vlentryread(struct ubik_trans *trans, afs_int32 offset, char *buffer,
146 struct vlentry *oep, tentry;
147 struct nvlentry *nep, *nbufp;
148 char *bufp = (char *)&tentry;
151 if (length != sizeof(vlentry))
153 i = vlread(trans, offset, bufp, length);
156 if (maxnservers == 13) {
157 nep = (struct nvlentry *)bufp;
158 nbufp = (struct nvlentry *)buffer;
159 for (i = 0; i < MAXTYPES; i++)
160 nbufp->volumeId[i] = ntohl(nep->volumeId[i]);
161 nbufp->flags = ntohl(nep->flags);
162 nbufp->LockAfsId = ntohl(nep->LockAfsId);
163 nbufp->LockTimestamp = ntohl(nep->LockTimestamp);
164 nbufp->cloneId = ntohl(nep->cloneId);
165 for (i = 0; i < MAXTYPES; i++)
166 nbufp->nextIdHash[i] = ntohl(nep->nextIdHash[i]);
167 nbufp->nextNameHash = ntohl(nep->nextNameHash);
168 memcpy(nbufp->name, nep->name, VL_MAXNAMELEN);
169 memcpy(nbufp->serverNumber, nep->serverNumber, NMAXNSERVERS);
170 memcpy(nbufp->serverPartition, nep->serverPartition, NMAXNSERVERS);
171 memcpy(nbufp->serverFlags, nep->serverFlags, NMAXNSERVERS);
173 oep = (struct vlentry *)bufp;
174 nbufp = (struct nvlentry *)buffer;
175 memset(nbufp, 0, sizeof(struct nvlentry));
176 for (i = 0; i < MAXTYPES; i++)
177 nbufp->volumeId[i] = ntohl(oep->volumeId[i]);
178 nbufp->flags = ntohl(oep->flags);
179 nbufp->LockAfsId = ntohl(oep->LockAfsId);
180 nbufp->LockTimestamp = ntohl(oep->LockTimestamp);
181 nbufp->cloneId = ntohl(oep->cloneId);
182 for (i = 0; i < MAXTYPES; i++)
183 nbufp->nextIdHash[i] = ntohl(oep->nextIdHash[i]);
184 nbufp->nextNameHash = ntohl(oep->nextNameHash);
185 memcpy(nbufp->name, oep->name, VL_MAXNAMELEN);
186 memcpy(nbufp->serverNumber, oep->serverNumber, NMAXNSERVERS);
187 memcpy(nbufp->serverPartition, oep->serverPartition, NMAXNSERVERS);
188 memcpy(nbufp->serverFlags, oep->serverFlags, NMAXNSERVERS);
193 /* Convenient write of small critical vldb header info to the database. */
195 write_vital_vlheader(struct vl_ctx *ctx)
198 (ctx->trans, 0, (char *)&ctx->cheader->vital_header, sizeof(vital_vlheader)))
206 /* This routine reads in the extent blocks for multi-homed servers.
207 * There used to be an initialization bug that would cause the contaddrs
208 * pointers in the first extent block to be bad. Here we will check the
209 * pointers and zero them in the in-memory copy if we find them bad. We
210 * also try to write the extent blocks back out. If we can't, then we
211 * will wait until the next write transaction to write them out
212 * (extent_mod tells us the on-disk copy is bad).
215 readExtents(struct ubik_trans *trans)
217 afs_uint32 extentAddr;
218 afs_int32 error = 0, code;
222 extentAddr = ntohl(rd_cheader.SIT);
226 /* Read the first extension block */
227 if (!rd_ex_addr[0]) {
228 rd_ex_addr[0] = (struct extentaddr *)malloc(VL_ADDREXTBLK_SIZE);
230 ERROR_EXIT(VL_NOMEM);
232 code = vlread(trans, extentAddr, (char *)rd_ex_addr[0], VL_ADDREXTBLK_SIZE);
234 free(rd_ex_addr[0]); /* Not the place to create it */
239 /* In case more that 64 mh servers are in use they're kept in these
240 * continuation blocks
242 for (i = 1; i < VL_MAX_ADDREXTBLKS; i++) {
243 if (!rd_ex_addr[0]->ex_contaddrs[i])
246 /* Before reading it in, check to see if the address is good */
247 if ((ntohl(rd_ex_addr[0]->ex_contaddrs[i]) <
248 ntohl(rd_ex_addr[0]->ex_contaddrs[i - 1]) + VL_ADDREXTBLK_SIZE)
249 || (ntohl(rd_ex_addr[0]->ex_contaddrs[i]) >
250 ntohl(rd_cheader.vital_header.eofPtr) - VL_ADDREXTBLK_SIZE)) {
252 rd_ex_addr[0]->ex_contaddrs[i] = 0;
257 /* Read the continuation block */
258 if (!rd_ex_addr[i]) {
259 rd_ex_addr[i] = (struct extentaddr *)malloc(VL_ADDREXTBLK_SIZE);
261 ERROR_EXIT(VL_NOMEM);
264 vlread(trans, ntohl(rd_ex_addr[0]->ex_contaddrs[i]),
265 (char *)rd_ex_addr[i], VL_ADDREXTBLK_SIZE);
267 free(rd_ex_addr[i]); /* Not the place to create it */
272 /* After reading it in, check to see if its a real continuation block */
273 if (ntohl(rd_ex_addr[i]->ex_flags) != VLCONTBLOCK) {
275 rd_ex_addr[0]->ex_contaddrs[i] = 0;
276 free(rd_ex_addr[i]); /* Not the place to create it */
283 code = vlwrite(trans, extentAddr, rd_ex_addr[0], VL_ADDREXTBLK_SIZE);
285 VLog(0, ("Multihome server support modification\n"));
287 /* Keep extent_mod true in-case the transaction aborts */
288 /* Don't return error so we don't abort transaction */
295 /* Check that the database has been initialized. Be careful to fail in a safe
296 manner, to avoid bogusly reinitializing the db. */
298 * reads in db cache from ubik.
300 * @param[in] ut ubik transaction
301 * @param[in] rock opaque pointer to an int*; if 1, we should rebuild the db
302 * if it appears empty, if 0 we should return an error if the
305 * @return operation status
309 UpdateCache(struct ubik_trans *trans, void *rock)
311 int *builddb_rock = rock;
312 int builddb = *builddb_rock;
313 afs_int32 error = 0, i, code, ubcode;
315 /* if version changed (or first call), read the header */
316 ubcode = vlread(trans, 0, (char *)&rd_cheader, sizeof(rd_cheader));
317 vldbversion = ntohl(rd_cheader.vital_header.vldbversion);
319 if (!ubcode && (vldbversion != 0)) {
320 memcpy(rd_HostAddress, rd_cheader.IpMappedAddr, sizeof(rd_cheader.IpMappedAddr));
321 for (i = 0; i < MAXSERVERID + 1; i++) { /* cvt HostAddress to host order */
322 rd_HostAddress[i] = ntohl(rd_HostAddress[i]);
325 code = readExtents(trans);
330 /* now, if can't read, or header is wrong, write a new header */
331 if (ubcode || vldbversion == 0) {
333 printf("Can't read VLDB header, re-initialising...\n");
335 /* try to write a good header */
336 memset(&rd_cheader, 0, sizeof(rd_cheader));
337 rd_cheader.vital_header.vldbversion = htonl(VLDBVERSION);
338 rd_cheader.vital_header.headersize = htonl(sizeof(rd_cheader));
339 /* DANGER: Must get this from a master place!! */
340 rd_cheader.vital_header.MaxVolumeId = htonl(0x20000000);
341 rd_cheader.vital_header.eofPtr = htonl(sizeof(rd_cheader));
342 for (i = 0; i < MAXSERVERID + 1; i++) {
343 rd_cheader.IpMappedAddr[i] = 0;
344 rd_HostAddress[i] = 0;
346 code = vlwrite(trans, 0, (char *)&rd_cheader, sizeof(rd_cheader));
348 printf("Can't write VLDB header (error = %d)\n", code);
351 vldbversion = ntohl(rd_cheader.vital_header.vldbversion);
353 ERROR_EXIT(VL_EMPTY);
357 if ((vldbversion != VLDBVERSION) && (vldbversion != OVLDBVERSION)
358 && (vldbversion != VLDBVERSION_4)) {
360 ("VLDB version %d doesn't match this software version(%d, %d or %d), quitting!\n",
361 vldbversion, VLDBVERSION_4, VLDBVERSION, OVLDBVERSION);
362 return VL_BADVERSION;
365 maxnservers = ((vldbversion == 3 || vldbversion == 4) ? 13 : 8);
373 CheckInit(struct ubik_trans *trans, int builddb)
377 code = ubik_CheckCache(trans, UpdateCache, &builddb);
382 /* these next two cases shouldn't happen (UpdateCache should either
383 * rebuild the db or return an error if these cases occur), but just to
384 * be on the safe side... */
385 if (vldbversion == 0) {
388 if ((vldbversion != VLDBVERSION) && (vldbversion != OVLDBVERSION)
389 && (vldbversion != VLDBVERSION_4)) {
390 return VL_BADVERSION;
398 GetExtentBlock(struct vl_ctx *ctx, register afs_int32 base)
400 afs_int32 blockindex, code, error = 0;
402 /* Base 0 must exist before any other can be created */
403 if ((base != 0) && !ctx->ex_addr[0])
404 ERROR_EXIT(VL_CREATEFAIL); /* internal error */
406 if (!ctx->ex_addr[0] || !ctx->ex_addr[0]->ex_contaddrs[base]) {
407 /* Create a new extension block */
408 if (!ctx->ex_addr[base]) {
409 ctx->ex_addr[base] = (struct extentaddr *)malloc(VL_ADDREXTBLK_SIZE);
410 if (!ctx->ex_addr[base])
411 ERROR_EXIT(VL_NOMEM);
413 memset(ctx->ex_addr[base], 0, VL_ADDREXTBLK_SIZE);
415 /* Write the full extension block at end of vldb */
416 ctx->ex_addr[base]->ex_flags = htonl(VLCONTBLOCK);
417 blockindex = ntohl(ctx->cheader->vital_header.eofPtr);
419 vlwrite(ctx->trans, blockindex, (char *)ctx->ex_addr[base],
424 /* Update the cheader.vitalheader structure on disk */
425 ctx->cheader->vital_header.eofPtr = blockindex + VL_ADDREXTBLK_SIZE;
426 ctx->cheader->vital_header.eofPtr = htonl(ctx->cheader->vital_header.eofPtr);
427 code = write_vital_vlheader(ctx);
431 /* Write the address of the base extension block in the vldb header */
433 ctx->cheader->SIT = htonl(blockindex);
435 vlwrite(ctx->trans, DOFFSET(0, ctx->cheader, &ctx->cheader->SIT),
436 (char *)&ctx->cheader->SIT, sizeof(ctx->cheader->SIT));
441 /* Write the address of this extension block into the base extension block */
442 ctx->ex_addr[0]->ex_contaddrs[base] = htonl(blockindex);
444 vlwrite(ctx->trans, ntohl(ctx->cheader->SIT), ctx->ex_addr[0],
445 sizeof(struct extentaddr));
456 FindExtentBlock(struct vl_ctx *ctx, afsUUID *uuidp,
457 afs_int32 createit, afs_int32 hostslot,
458 struct extentaddr **expp, afs_int32 *basep)
461 struct extentaddr *exp;
462 afs_int32 i, j, code, base, index, error = 0;
467 /* Create the first extension block if it does not exist */
468 if (!ctx->cheader->SIT) {
469 code = GetExtentBlock(ctx, 0);
474 for (i = 0; i < MAXSERVERID + 1; i++) {
475 if ((ctx->hostaddress[i] & 0xff000000) == 0xff000000) {
476 if ((base = (ctx->hostaddress[i] >> 16) & 0xff) > VL_MAX_ADDREXTBLKS) {
477 ERROR_EXIT(VL_INDEXERANGE);
479 if ((index = ctx->hostaddress[i] & 0x0000ffff) > VL_MHSRV_PERBLK) {
480 ERROR_EXIT(VL_INDEXERANGE);
482 exp = &ctx->ex_addr[base][index];
483 tuuid = exp->ex_hostuuid;
484 afs_ntohuuid(&tuuid);
485 if (afs_uuid_equal(uuidp, &tuuid)) {
494 if (hostslot == -1) {
495 for (i = 0; i < MAXSERVERID + 1; i++) {
496 if (!ctx->hostaddress[i])
500 ERROR_EXIT(VL_REPSFULL);
505 for (base = 0; base < VL_MAX_ADDREXTBLKS; base++) {
506 if (!ctx->ex_addr[0]->ex_contaddrs[base]) {
507 code = GetExtentBlock(ctx, base);
511 for (j = 1; j < VL_MHSRV_PERBLK; j++) {
512 exp = &ctx->ex_addr[base][j];
513 tuuid = exp->ex_hostuuid;
514 afs_ntohuuid(&tuuid);
515 if (afs_uuid_is_nil(&tuuid)) {
517 afs_htonuuid(&tuuid);
518 exp->ex_hostuuid = tuuid;
521 DOFFSET(ntohl(ctx->ex_addr[0]->ex_contaddrs[base]),
522 (char *)ctx->ex_addr[base], (char *)exp),
523 (char *)&tuuid, sizeof(tuuid));
526 ctx->hostaddress[i] =
527 0xff000000 | ((base << 16) & 0xff0000) | (j & 0xffff);
530 if (vldbversion != VLDBVERSION_4) {
531 ctx->cheader->vital_header.vldbversion =
532 htonl(VLDBVERSION_4);
533 code = write_vital_vlheader(ctx);
537 ctx->cheader->IpMappedAddr[i] = htonl(ctx->hostaddress[i]);
540 DOFFSET(0, ctx->cheader,
541 &ctx->cheader->IpMappedAddr[i]),
542 (char *)&ctx->cheader->IpMappedAddr[i],
550 ERROR_EXIT(VL_REPSFULL); /* No reason to utilize a new error code */
557 /* Allocate a free block of storage for entry, returning address of a new
558 zeroed entry (or zero if something is wrong). */
560 AllocBlock(struct vl_ctx *ctx, struct nvlentry *tentry)
562 afs_int32 blockindex;
564 if (ctx->cheader->vital_header.freePtr) {
565 /* allocate this dude */
566 blockindex = ntohl(ctx->cheader->vital_header.freePtr);
567 if (vlentryread(ctx->trans, blockindex, (char *)tentry, sizeof(vlentry)))
569 ctx->cheader->vital_header.freePtr = htonl(tentry->nextIdHash[0]);
571 /* hosed, nothing on free list, grow file */
572 blockindex = ntohl(ctx->cheader->vital_header.eofPtr); /* remember this guy */
573 ctx->cheader->vital_header.eofPtr = htonl(blockindex + sizeof(vlentry));
575 ctx->cheader->vital_header.allocs++;
576 if (write_vital_vlheader(ctx))
578 memset(tentry, 0, sizeof(nvlentry)); /* zero new entry */
583 /* Free a block given its index. It must already have been unthreaded. Returns zero for success or an error code on failure. */
585 FreeBlock(struct vl_ctx *ctx, afs_int32 blockindex)
587 struct nvlentry tentry;
589 /* check validity of blockindex just to be on the safe side */
590 if (!index_OK(ctx, blockindex))
592 memset(&tentry, 0, sizeof(nvlentry));
593 tentry.nextIdHash[0] = ctx->cheader->vital_header.freePtr; /* already in network order */
594 tentry.flags = htonl(VLFREE);
595 ctx->cheader->vital_header.freePtr = htonl(blockindex);
596 if (vlwrite(ctx->trans, blockindex, (char *)&tentry, sizeof(nvlentry)))
598 ctx->cheader->vital_header.frees++;
599 if (write_vital_vlheader(ctx))
605 /* Look for a block by volid and voltype (if not known use -1 which searches
606 * all 3 volid hash lists. Note that the linked lists are read in first from
607 * the database header. If found read the block's contents into the area
608 * pointed to by tentry and return the block's index. If not found return 0.
611 FindByID(struct vl_ctx *ctx, afs_uint32 volid, afs_int32 voltype,
612 struct nvlentry *tentry, afs_int32 *error)
614 afs_int32 typeindex, hashindex, blockindex;
617 hashindex = IDHash(volid);
619 /* Should we have one big hash table for volids as opposed to the three ones? */
620 for (typeindex = 0; typeindex < MAXTYPES; typeindex++) {
621 for (blockindex = ntohl(ctx->cheader->VolidHash[typeindex][hashindex]);
623 blockindex = tentry->nextIdHash[typeindex]) {
625 (ctx->trans, blockindex, (char *)tentry, sizeof(nvlentry))) {
629 if (volid == tentry->volumeId[typeindex])
634 for (blockindex = ntohl(ctx->cheader->VolidHash[voltype][hashindex]);
635 blockindex != NULLO; blockindex = tentry->nextIdHash[voltype]) {
637 (ctx->trans, blockindex, (char *)tentry, sizeof(nvlentry))) {
641 if (volid == tentry->volumeId[voltype])
645 return 0; /* no such entry */
649 /* Look for a block by volume name. If found read the block's contents into
650 * the area pointed to by tentry and return the block's index. If not
654 FindByName(struct vl_ctx *ctx, char *volname, struct nvlentry *tentry,
658 afs_int32 blockindex;
659 char tname[VL_MAXNAMELEN];
661 /* remove .backup or .readonly extensions for stupid backwards
664 hashindex = strlen(volname); /* really string length */
665 if (hashindex >= 8 && strcmp(volname + hashindex - 7, ".backup") == 0) {
666 /* this is a backup volume */
667 strcpy(tname, volname);
668 tname[hashindex - 7] = 0; /* zap extension */
669 } else if (hashindex >= 10
670 && strcmp(volname + hashindex - 9, ".readonly") == 0) {
671 /* this is a readonly volume */
672 strcpy(tname, volname);
673 tname[hashindex - 9] = 0; /* zap extension */
675 strcpy(tname, volname);
678 hashindex = NameHash(tname);
679 for (blockindex = ntohl(ctx->cheader->VolnameHash[hashindex]);
680 blockindex != NULLO; blockindex = tentry->nextNameHash) {
681 if (vlentryread(ctx->trans, blockindex, (char *)tentry, sizeof(nvlentry))) {
685 if (!strcmp(tname, tentry->name))
688 return 0; /* no such entry */
692 * Returns whether or not any of the supplied volume IDs already exist
695 * @param ctx transaction context
696 * @param ids an array of volume IDs
697 * @param ids_len the number of elements in the 'ids' array
698 * @param error filled in with an error code in case of error
700 * @return whether any of the volume IDs are already used
701 * @retval 1 at least one of the volume IDs is already used
702 * @retval 0 none of the volume IDs are used, or an error occurred
705 EntryIDExists(struct vl_ctx *ctx, const afs_uint32 *ids,
706 afs_int32 ids_len, afs_int32 *error)
709 struct nvlentry tentry;
713 for (typeindex = 0; typeindex < ids_len; typeindex++) {
715 && FindByID(ctx, ids[typeindex], -1, &tentry, error)) {
727 * Finds the next range of unused volume IDs in the vldb.
729 * @param ctx transaction context
730 * @param maxvolid the current max vol ID, and where to start looking
731 * for an unused volume ID range
732 * @param bump how many volume IDs we need to be unused
733 * @param error filled in with an error code in case of error
735 * @return the next volume ID 'volid' such that the range
736 * [volid, volid+bump) of volume IDs is unused, or 0 if there's
740 NextUnusedID(struct vl_ctx *ctx, afs_uint32 maxvolid, afs_uint32 bump,
743 struct nvlentry tentry;
749 /* we simply start at the given maxvolid, keep a running tally of
750 * how many free volume IDs we've seen in a row, and return when
751 * we've seen 'bump' unused IDs in a row */
752 for (id = maxvolid, nfree = 0; nfree < bump; ++id) {
753 if (FindByID(ctx, id, -1, &tentry, error)) {
762 /* 'id' is now at the end of the [maxvolid,maxvolid+bump) range,
763 * but we need to return the first unused id, so subtract the
764 * number of current running free IDs to get the beginning */
769 HashNDump(struct vl_ctx *ctx, int hashindex)
773 struct nvlentry tentry;
775 for (blockindex = ntohl(ctx->cheader->VolnameHash[hashindex]);
776 blockindex != NULLO; blockindex = tentry.nextNameHash) {
777 if (vlentryread(ctx->trans, blockindex, (char *)&tentry, sizeof(nvlentry)))
781 ("[%d]#%d: %10d %d %d (%s)\n", hashindex, i, tentry.volumeId[0],
782 tentry.nextIdHash[0], tentry.nextNameHash, tentry.name));
789 HashIdDump(struct vl_ctx *ctx, int hashindex)
793 struct nvlentry tentry;
795 for (blockindex = ntohl(ctx->cheader->VolidHash[0][hashindex]);
796 blockindex != NULLO; blockindex = tentry.nextIdHash[0]) {
797 if (vlentryread(ctx->trans, blockindex, (char *)&tentry, sizeof(nvlentry)))
801 ("[%d]#%d: %10d %d %d (%s)\n", hashindex, i, tentry.volumeId[0],
802 tentry.nextIdHash[0], tentry.nextNameHash, tentry.name));
808 /* Add a block to the hash table given a pointer to the block and its index.
809 * The block is threaded onto both hash tables and written to disk. The
810 * routine returns zero if there were no errors.
813 ThreadVLentry(struct vl_ctx *ctx, afs_int32 blockindex,
814 struct nvlentry *tentry)
818 if (!index_OK(ctx, blockindex))
820 /* Insert into volid's hash linked list */
821 if ((errorcode = HashVolid(ctx, RWVOL, blockindex, tentry)))
824 /* For rw entries we also enter the RO and BACK volume ids (if they
825 * exist) in the hash tables; note all there volids (RW, RO, BACK)
826 * should not be hashed yet! */
827 if (tentry->volumeId[ROVOL]) {
828 if ((errorcode = HashVolid(ctx, ROVOL, blockindex, tentry)))
831 if (tentry->volumeId[BACKVOL]) {
832 if ((errorcode = HashVolid(ctx, BACKVOL, blockindex, tentry)))
836 /* Insert into volname's hash linked list */
837 HashVolname(ctx, blockindex, tentry);
839 /* Update cheader entry */
840 if (write_vital_vlheader(ctx))
843 /* Update hash list pointers in the entry itself */
844 if (vlentrywrite(ctx->trans, blockindex, (char *)tentry, sizeof(nvlentry)))
850 /* Remove a block from both the hash tables. If success return 0, else
851 * return an error code. */
853 UnthreadVLentry(struct vl_ctx *ctx, afs_int32 blockindex,
854 struct nvlentry *aentry)
856 afs_int32 errorcode, typeindex;
858 if (!index_OK(ctx, blockindex))
860 if ((errorcode = UnhashVolid(ctx, RWVOL, blockindex, aentry)))
863 /* Take the RO/RW entries of their respective hash linked lists. */
864 for (typeindex = ROVOL; typeindex <= BACKVOL; typeindex++) {
865 if ((errorcode = UnhashVolid(ctx, typeindex, blockindex, aentry)))
869 /* Take it out of the Volname hash list */
870 if ((errorcode = UnhashVolname(ctx, blockindex, aentry)))
873 /* Update cheader entry */
874 write_vital_vlheader(ctx);
879 /* cheader must have be read before this routine is called. */
881 HashVolid(struct vl_ctx *ctx, afs_int32 voltype, afs_int32 blockindex,
882 struct nvlentry *tentry)
884 afs_int32 hashindex, errorcode;
885 struct nvlentry ventry;
888 (ctx, tentry->volumeId[voltype], voltype, &ventry, &errorcode))
889 return VL_IDALREADYHASHED;
892 hashindex = IDHash(tentry->volumeId[voltype]);
893 tentry->nextIdHash[voltype] =
894 ntohl(ctx->cheader->VolidHash[voltype][hashindex]);
895 ctx->cheader->VolidHash[voltype][hashindex] = htonl(blockindex);
897 (ctx->trans, DOFFSET(0, ctx->cheader, &ctx->cheader->VolidHash[voltype][hashindex]),
898 (char *)&ctx->cheader->VolidHash[voltype][hashindex], sizeof(afs_int32)))
904 /* cheader must have be read before this routine is called. */
906 UnhashVolid(struct vl_ctx *ctx, afs_int32 voltype, afs_int32 blockindex,
907 struct nvlentry *aentry)
909 int hashindex, nextblockindex, prevblockindex;
910 struct nvlentry tentry;
914 if (aentry->volumeId[voltype] == NULLO) /* Assume no volume id */
916 /* Take it out of the VolId[voltype] hash list */
917 hashindex = IDHash(aentry->volumeId[voltype]);
918 nextblockindex = ntohl(ctx->cheader->VolidHash[voltype][hashindex]);
919 if (nextblockindex == blockindex) {
920 /* First on the hash list; just adjust pointers */
921 ctx->cheader->VolidHash[voltype][hashindex] =
922 htonl(aentry->nextIdHash[voltype]);
925 DOFFSET(0, ctx->cheader,
926 &ctx->cheader->VolidHash[voltype][hashindex]),
927 (char *)&ctx->cheader->VolidHash[voltype][hashindex],
932 while (nextblockindex != blockindex) {
933 prevblockindex = nextblockindex; /* always done once */
935 (ctx->trans, nextblockindex, (char *)&tentry, sizeof(nvlentry)))
937 if ((nextblockindex = tentry.nextIdHash[voltype]) == NULLO)
940 temp = tentry.nextIdHash[voltype] = aentry->nextIdHash[voltype];
941 temp = htonl(temp); /* convert to network byte order before writing */
944 DOFFSET(prevblockindex, &tentry, &tentry.nextIdHash[voltype]),
945 (char *)&temp, sizeof(afs_int32)))
948 aentry->nextIdHash[voltype] = 0;
954 HashVolname(struct vl_ctx *ctx, afs_int32 blockindex,
955 struct nvlentry *aentry)
960 /* Insert into volname's hash linked list */
961 hashindex = NameHash(aentry->name);
962 aentry->nextNameHash = ntohl(ctx->cheader->VolnameHash[hashindex]);
963 ctx->cheader->VolnameHash[hashindex] = htonl(blockindex);
965 vlwrite(ctx->trans, DOFFSET(0, ctx->cheader, &ctx->cheader->VolnameHash[hashindex]),
966 (char *)&ctx->cheader->VolnameHash[hashindex], sizeof(afs_int32));
974 UnhashVolname(struct vl_ctx *ctx, afs_int32 blockindex,
975 struct nvlentry *aentry)
977 afs_int32 hashindex, nextblockindex, prevblockindex;
978 struct nvlentry tentry;
981 /* Take it out of the Volname hash list */
982 hashindex = NameHash(aentry->name);
983 nextblockindex = ntohl(ctx->cheader->VolnameHash[hashindex]);
984 if (nextblockindex == blockindex) {
985 /* First on the hash list; just adjust pointers */
986 ctx->cheader->VolnameHash[hashindex] = htonl(aentry->nextNameHash);
988 (ctx->trans, DOFFSET(0, ctx->cheader, &ctx->cheader->VolnameHash[hashindex]),
989 (char *)&ctx->cheader->VolnameHash[hashindex], sizeof(afs_int32)))
992 while (nextblockindex != blockindex) {
993 prevblockindex = nextblockindex; /* always done at least once */
995 (ctx->trans, nextblockindex, (char *)&tentry, sizeof(nvlentry)))
997 if ((nextblockindex = tentry.nextNameHash) == NULLO)
1000 tentry.nextNameHash = aentry->nextNameHash;
1001 temp = htonl(tentry.nextNameHash);
1003 (ctx->trans, DOFFSET(prevblockindex, &tentry, &tentry.nextNameHash),
1004 (char *)&temp, sizeof(afs_int32)))
1007 aentry->nextNameHash = 0;
1012 /* Returns the vldb entry tentry at offset index; remaining is the number of
1013 * entries left; the routine also returns the index of the next sequential
1018 NextEntry(struct vl_ctx *ctx, afs_int32 blockindex,
1019 struct nvlentry *tentry, afs_int32 *remaining)
1021 afs_int32 lastblockindex;
1023 if (blockindex == 0) /* get first one */
1024 blockindex = sizeof(*ctx->cheader);
1026 if (!index_OK(ctx, blockindex)) {
1027 *remaining = -1; /* error */
1030 blockindex += sizeof(nvlentry);
1032 /* now search for the first entry that isn't free */
1033 for (lastblockindex = ntohl(ctx->cheader->vital_header.eofPtr);
1034 blockindex < lastblockindex;) {
1035 if (vlentryread(ctx->trans, blockindex, (char *)tentry, sizeof(nvlentry))) {
1039 if (tentry->flags == VLCONTBLOCK) {
1041 * This is a special mh extension block just simply skip over it
1043 blockindex += VL_ADDREXTBLK_SIZE;
1045 if (tentry->flags != VLFREE) {
1046 /* estimate remaining number of entries, not including this one */
1048 (lastblockindex - blockindex) / sizeof(nvlentry) - 1;
1051 blockindex += sizeof(nvlentry);
1054 *remaining = 0; /* no more entries */
1059 /* Routine to verify that index is a legal offset to a vldb entry in the
1063 index_OK(struct vl_ctx *ctx, afs_int32 blockindex)
1065 if ((blockindex < sizeof(*ctx->cheader))
1066 || (blockindex >= ntohl(ctx->cheader->vital_header.eofPtr)))
1071 /* makes a deep copy of src_ex into dst_ex */
1073 vlexcpy(struct extentaddr **dst_ex, struct extentaddr **src_ex)
1076 for (i = 0; i < VL_MAX_ADDREXTBLKS; i++) {
1079 dst_ex[i] = malloc(VL_ADDREXTBLK_SIZE);
1084 memcpy(dst_ex[i], src_ex[i], VL_ADDREXTBLK_SIZE);
1086 } else if (dst_ex[i]) {
1087 /* we have no src, but we have a dst... meaning, this block
1097 vlsetcache(struct vl_ctx *ctx, int locktype)
1099 if (locktype == LOCKREAD) {
1100 ctx->hostaddress = rd_HostAddress;
1101 ctx->ex_addr = rd_ex_addr;
1102 ctx->cheader = &rd_cheader;
1105 memcpy(wr_HostAddress, rd_HostAddress, sizeof(wr_HostAddress));
1106 memcpy(&wr_cheader, &rd_cheader, sizeof(wr_cheader));
1108 ctx->hostaddress = wr_HostAddress;
1109 ctx->ex_addr = wr_ex_addr;
1110 ctx->cheader = &wr_cheader;
1112 return vlexcpy(wr_ex_addr, rd_ex_addr);
1119 memcpy(rd_HostAddress, wr_HostAddress, sizeof(rd_HostAddress));
1120 memcpy(&rd_cheader, &wr_cheader, sizeof(rd_cheader));
1121 return vlexcpy(rd_ex_addr, wr_ex_addr);