backport to buster
[hcoop/debian/openafs.git] / src / vlserver / vlutils.c
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10 #include <afsconfig.h>
11 #include <afs/param.h>
12
13 #include <roken.h>
14
15 #include <lock.h>
16 #include <rx/xdr.h>
17 #include <ubik.h>
18
19 #include "vlserver.h"
20 #include "vlserver_internal.h"
21
22 struct vlheader xheader;
23 extern int maxnservers;
24 extern afs_uint32 rd_HostAddress[MAXSERVERID + 1];
25 extern afs_uint32 wr_HostAddress[MAXSERVERID + 1];
26 struct extentaddr *rd_ex_addr[VL_MAX_ADDREXTBLKS] = { 0, 0, 0, 0 };
27 struct extentaddr *wr_ex_addr[VL_MAX_ADDREXTBLKS] = { 0, 0, 0, 0 };
28 struct vlheader rd_cheader; /* kept in network byte order */
29 struct vlheader wr_cheader;
30 int vldbversion = 0;
31
32 static int index_OK(struct vl_ctx *ctx, afs_int32 blockindex);
33
34 #define ERROR_EXIT(code) do { \
35 error = (code); \
36 goto error_exit; \
37 } while (0)
38
39 /* Hashing algorithm based on the volume id; HASHSIZE must be prime */
40 afs_int32
41 IDHash(afs_int32 volumeid)
42 {
43 return ((abs(volumeid)) % HASHSIZE);
44 }
45
46
47 /* Hashing algorithm based on the volume name; name's size is implicit (64 chars) and if changed it should be reflected here. */
48 afs_int32
49 NameHash(char *volumename)
50 {
51 unsigned int hash;
52 int i;
53
54 hash = 0;
55 for (i = strlen(volumename), volumename += i - 1; i--; volumename--)
56 hash = (hash * 63) + (*((unsigned char *)volumename) - 63);
57 return (hash % HASHSIZE);
58 }
59
60
61 /* package up seek and write into one procedure for ease of use */
62 afs_int32
63 vlwrite(struct ubik_trans *trans, afs_int32 offset, void *buffer,
64 afs_int32 length)
65 {
66 afs_int32 errorcode;
67
68 if ((errorcode = ubik_Seek(trans, 0, offset)))
69 return errorcode;
70 return (ubik_Write(trans, buffer, length));
71 }
72
73
74 /* Package up seek and read into one procedure for ease of use */
75 afs_int32
76 vlread(struct ubik_trans *trans, afs_int32 offset, char *buffer,
77 afs_int32 length)
78 {
79 afs_int32 errorcode;
80
81 if ((errorcode = ubik_Seek(trans, 0, offset)))
82 return errorcode;
83 return (ubik_Read(trans, buffer, length));
84 }
85
86
87 /* take entry and convert to network order and write to disk */
88 afs_int32
89 vlentrywrite(struct ubik_trans *trans, afs_int32 offset, void *buffer,
90 afs_int32 length)
91 {
92 struct vlentry oentry;
93 struct nvlentry nentry, *nep;
94 char *bufp;
95 afs_int32 i;
96
97 if (length != sizeof(oentry))
98 return -1;
99 if (maxnservers == 13) {
100 nep = (struct nvlentry *)buffer;
101 for (i = 0; i < MAXTYPES; i++)
102 nentry.volumeId[i] = htonl(nep->volumeId[i]);
103 nentry.flags = htonl(nep->flags);
104 nentry.LockAfsId = htonl(nep->LockAfsId);
105 nentry.LockTimestamp = htonl(nep->LockTimestamp);
106 nentry.cloneId = htonl(nep->cloneId);
107 for (i = 0; i < MAXTYPES; i++)
108 nentry.nextIdHash[i] = htonl(nep->nextIdHash[i]);
109 nentry.nextNameHash = htonl(nep->nextNameHash);
110 memcpy(nentry.name, nep->name, VL_MAXNAMELEN);
111 memcpy(nentry.serverNumber, nep->serverNumber, NMAXNSERVERS);
112 memcpy(nentry.serverPartition, nep->serverPartition, NMAXNSERVERS);
113 memcpy(nentry.serverFlags, nep->serverFlags, NMAXNSERVERS);
114 bufp = (char *)&nentry;
115 } else {
116 memset(&oentry, 0, sizeof(struct vlentry));
117 nep = (struct nvlentry *)buffer;
118 for (i = 0; i < MAXTYPES; i++)
119 oentry.volumeId[i] = htonl(nep->volumeId[i]);
120 oentry.flags = htonl(nep->flags);
121 oentry.LockAfsId = htonl(nep->LockAfsId);
122 oentry.LockTimestamp = htonl(nep->LockTimestamp);
123 oentry.cloneId = htonl(nep->cloneId);
124 for (i = 0; i < MAXTYPES; i++)
125 oentry.nextIdHash[i] = htonl(nep->nextIdHash[i]);
126 oentry.nextNameHash = htonl(nep->nextNameHash);
127 memcpy(oentry.name, nep->name, VL_MAXNAMELEN);
128 memcpy(oentry.serverNumber, nep->serverNumber, OMAXNSERVERS);
129 memcpy(oentry.serverPartition, nep->serverPartition, OMAXNSERVERS);
130 memcpy(oentry.serverFlags, nep->serverFlags, OMAXNSERVERS);
131 bufp = (char *)&oentry;
132 }
133 return vlwrite(trans, offset, bufp, length);
134 }
135
136 /* read entry and convert to host order and write to disk */
137 afs_int32
138 vlentryread(struct ubik_trans *trans, afs_int32 offset, char *buffer,
139 afs_int32 length)
140 {
141 struct vlentry *oep, tentry;
142 struct nvlentry *nep, *nbufp;
143 char *bufp = (char *)&tentry;
144 afs_int32 i;
145
146 if (length != sizeof(vlentry))
147 return -1;
148 i = vlread(trans, offset, bufp, length);
149 if (i)
150 return i;
151 if (maxnservers == 13) {
152 nep = (struct nvlentry *)bufp;
153 nbufp = (struct nvlentry *)buffer;
154 for (i = 0; i < MAXTYPES; i++)
155 nbufp->volumeId[i] = ntohl(nep->volumeId[i]);
156 nbufp->flags = ntohl(nep->flags);
157 nbufp->LockAfsId = ntohl(nep->LockAfsId);
158 nbufp->LockTimestamp = ntohl(nep->LockTimestamp);
159 nbufp->cloneId = ntohl(nep->cloneId);
160 for (i = 0; i < MAXTYPES; i++)
161 nbufp->nextIdHash[i] = ntohl(nep->nextIdHash[i]);
162 nbufp->nextNameHash = ntohl(nep->nextNameHash);
163 memcpy(nbufp->name, nep->name, VL_MAXNAMELEN);
164 memcpy(nbufp->serverNumber, nep->serverNumber, NMAXNSERVERS);
165 memcpy(nbufp->serverPartition, nep->serverPartition, NMAXNSERVERS);
166 memcpy(nbufp->serverFlags, nep->serverFlags, NMAXNSERVERS);
167 } else {
168 oep = (struct vlentry *)bufp;
169 nbufp = (struct nvlentry *)buffer;
170 memset(nbufp, 0, sizeof(struct nvlentry));
171 for (i = 0; i < MAXTYPES; i++)
172 nbufp->volumeId[i] = ntohl(oep->volumeId[i]);
173 nbufp->flags = ntohl(oep->flags);
174 nbufp->LockAfsId = ntohl(oep->LockAfsId);
175 nbufp->LockTimestamp = ntohl(oep->LockTimestamp);
176 nbufp->cloneId = ntohl(oep->cloneId);
177 for (i = 0; i < MAXTYPES; i++)
178 nbufp->nextIdHash[i] = ntohl(oep->nextIdHash[i]);
179 nbufp->nextNameHash = ntohl(oep->nextNameHash);
180 memcpy(nbufp->name, oep->name, VL_MAXNAMELEN);
181 memcpy(nbufp->serverNumber, oep->serverNumber, NMAXNSERVERS);
182 memcpy(nbufp->serverPartition, oep->serverPartition, NMAXNSERVERS);
183 memcpy(nbufp->serverFlags, oep->serverFlags, NMAXNSERVERS);
184 }
185 return 0;
186 }
187
188 /* Convenient write of small critical vldb header info to the database. */
189 int
190 write_vital_vlheader(struct vl_ctx *ctx)
191 {
192 if (vlwrite
193 (ctx->trans, 0, (char *)&ctx->cheader->vital_header, sizeof(vital_vlheader)))
194 return VL_IO;
195 return 0;
196 }
197
198
199 int extent_mod = 0;
200
201 /* This routine reads in the extent blocks for multi-homed servers.
202 * There used to be an initialization bug that would cause the contaddrs
203 * pointers in the first extent block to be bad. Here we will check the
204 * pointers and zero them in the in-memory copy if we find them bad. We
205 * also try to write the extent blocks back out. If we can't, then we
206 * will wait until the next write transaction to write them out
207 * (extent_mod tells us the on-disk copy is bad).
208 */
209 afs_int32
210 readExtents(struct ubik_trans *trans)
211 {
212 afs_uint32 extentAddr;
213 afs_int32 error = 0, code;
214 int i;
215
216 extent_mod = 0;
217 extentAddr = ntohl(rd_cheader.SIT);
218 if (!extentAddr)
219 return 0;
220
221 /* Read the first extension block */
222 if (!rd_ex_addr[0]) {
223 rd_ex_addr[0] = malloc(VL_ADDREXTBLK_SIZE);
224 if (!rd_ex_addr[0])
225 ERROR_EXIT(VL_NOMEM);
226 }
227 code = vlread(trans, extentAddr, (char *)rd_ex_addr[0], VL_ADDREXTBLK_SIZE);
228 if (code) {
229 free(rd_ex_addr[0]); /* Not the place to create it */
230 rd_ex_addr[0] = 0;
231 ERROR_EXIT(VL_IO);
232 }
233
234 /* In case more that 64 mh servers are in use they're kept in these
235 * continuation blocks
236 */
237 for (i = 1; i < VL_MAX_ADDREXTBLKS; i++) {
238 if (!rd_ex_addr[0]->ex_contaddrs[i])
239 continue;
240
241 /* Before reading it in, check to see if the address is good */
242 if ((ntohl(rd_ex_addr[0]->ex_contaddrs[i]) <
243 ntohl(rd_ex_addr[0]->ex_contaddrs[i - 1]) + VL_ADDREXTBLK_SIZE)
244 || (ntohl(rd_ex_addr[0]->ex_contaddrs[i]) >
245 ntohl(rd_cheader.vital_header.eofPtr) - VL_ADDREXTBLK_SIZE)) {
246 extent_mod = 1;
247 rd_ex_addr[0]->ex_contaddrs[i] = 0;
248 continue;
249 }
250
251
252 /* Read the continuation block */
253 if (!rd_ex_addr[i]) {
254 rd_ex_addr[i] = malloc(VL_ADDREXTBLK_SIZE);
255 if (!rd_ex_addr[i])
256 ERROR_EXIT(VL_NOMEM);
257 }
258 code =
259 vlread(trans, ntohl(rd_ex_addr[0]->ex_contaddrs[i]),
260 (char *)rd_ex_addr[i], VL_ADDREXTBLK_SIZE);
261 if (code) {
262 free(rd_ex_addr[i]); /* Not the place to create it */
263 rd_ex_addr[i] = 0;
264 ERROR_EXIT(VL_IO);
265 }
266
267 /* After reading it in, check to see if its a real continuation block */
268 if (ntohl(rd_ex_addr[i]->ex_hdrflags) != VLCONTBLOCK) {
269 extent_mod = 1;
270 rd_ex_addr[0]->ex_contaddrs[i] = 0;
271 free(rd_ex_addr[i]); /* Not the place to create it */
272 rd_ex_addr[i] = 0;
273 continue;
274 }
275 }
276
277 if (extent_mod) {
278 code = vlwrite(trans, extentAddr, rd_ex_addr[0], VL_ADDREXTBLK_SIZE);
279 if (!code) {
280 VLog(0, ("Multihome server support modification\n"));
281 }
282 /* Keep extent_mod true in-case the transaction aborts */
283 /* Don't return error so we don't abort transaction */
284 }
285
286 error_exit:
287 return error;
288 }
289
290 /* Check that the database has been initialized. Be careful to fail in a safe
291 manner, to avoid bogusly reinitializing the db. */
292 /**
293 * reads in db cache from ubik.
294 *
295 * @param[in] ut ubik transaction
296 * @param[in] rock opaque pointer to an int*; if 1, we should rebuild the db
297 * if it appears empty, if 0 we should return an error if the
298 * db appears empty
299 *
300 * @return operation status
301 * @retval 0 success
302 */
303 static afs_int32
304 UpdateCache(struct ubik_trans *trans, void *rock)
305 {
306 int *builddb_rock = rock;
307 int builddb = *builddb_rock;
308 afs_int32 error = 0, i, code, ubcode;
309
310 /* if version changed (or first call), read the header */
311 ubcode = vlread(trans, 0, (char *)&rd_cheader, sizeof(rd_cheader));
312 vldbversion = ntohl(rd_cheader.vital_header.vldbversion);
313
314 if (!ubcode && (vldbversion != 0)) {
315 memcpy(rd_HostAddress, rd_cheader.IpMappedAddr, sizeof(rd_cheader.IpMappedAddr));
316 for (i = 0; i < MAXSERVERID + 1; i++) { /* cvt HostAddress to host order */
317 rd_HostAddress[i] = ntohl(rd_HostAddress[i]);
318 }
319
320 code = readExtents(trans);
321 if (code)
322 ERROR_EXIT(code);
323 }
324
325 /* now, if can't read, or header is wrong, write a new header */
326 if (ubcode || vldbversion == 0) {
327 if (builddb) {
328 VLog(0, ("Can't read VLDB header, re-initialising...\n"));
329
330 /* try to write a good header */
331 /* The read cache will be sync'ed to this new header
332 * when the ubik transaction is ended by vlsynccache(). */
333 memset(&wr_cheader, 0, sizeof(wr_cheader));
334 wr_cheader.vital_header.vldbversion = htonl(VLDBVERSION);
335 wr_cheader.vital_header.headersize = htonl(sizeof(wr_cheader));
336 /* DANGER: Must get this from a master place!! */
337 wr_cheader.vital_header.MaxVolumeId = htonl(0x20000000);
338 wr_cheader.vital_header.eofPtr = htonl(sizeof(wr_cheader));
339 for (i = 0; i < MAXSERVERID + 1; i++) {
340 wr_cheader.IpMappedAddr[i] = 0;
341 wr_HostAddress[i] = 0;
342 }
343 code = vlwrite(trans, 0, (char *)&wr_cheader, sizeof(wr_cheader));
344 if (code) {
345 VLog(0, ("Can't write VLDB header (error = %d)\n", code));
346 ERROR_EXIT(VL_IO);
347 }
348 vldbversion = ntohl(wr_cheader.vital_header.vldbversion);
349 } else {
350 VLog(1, ("Unable to read VLDB header.\n"));
351 ERROR_EXIT(VL_EMPTY);
352 }
353 }
354
355 if ((vldbversion != VLDBVERSION) && (vldbversion != OVLDBVERSION)
356 && (vldbversion != VLDBVERSION_4)) {
357 VLog(0,
358 ("VLDB version %d doesn't match this software version(%d, %d or %d), quitting!\n",
359 vldbversion, VLDBVERSION_4, VLDBVERSION, OVLDBVERSION));
360 ERROR_EXIT(VL_BADVERSION);
361 }
362
363 maxnservers = ((vldbversion == 3 || vldbversion == 4) ? 13 : 8);
364
365 error_exit:
366 /* all done */
367 return error;
368 }
369
370 afs_int32
371 CheckInit(struct ubik_trans *trans, int builddb)
372 {
373 afs_int32 code;
374
375 code = ubik_CheckCache(trans, UpdateCache, &builddb);
376 if (code) {
377 return code;
378 }
379
380 /* these next two cases shouldn't happen (UpdateCache should either
381 * rebuild the db or return an error if these cases occur), but just to
382 * be on the safe side... */
383 if (vldbversion == 0) {
384 return VL_EMPTY;
385 }
386 if ((vldbversion != VLDBVERSION) && (vldbversion != OVLDBVERSION)
387 && (vldbversion != VLDBVERSION_4)) {
388 return VL_BADVERSION;
389 }
390
391 return 0;
392 }
393
394
395 afs_int32
396 GetExtentBlock(struct vl_ctx *ctx, register afs_int32 base)
397 {
398 afs_int32 blockindex, code, error = 0;
399
400 /* Base 0 must exist before any other can be created */
401 if ((base != 0) && !ctx->ex_addr[0])
402 ERROR_EXIT(VL_CREATEFAIL); /* internal error */
403
404 if (!ctx->ex_addr[0] || !ctx->ex_addr[0]->ex_contaddrs[base]) {
405 /* Create a new extension block */
406 if (!ctx->ex_addr[base]) {
407 ctx->ex_addr[base] = malloc(VL_ADDREXTBLK_SIZE);
408 if (!ctx->ex_addr[base])
409 ERROR_EXIT(VL_NOMEM);
410 }
411 memset(ctx->ex_addr[base], 0, VL_ADDREXTBLK_SIZE);
412
413 /* Write the full extension block at end of vldb */
414 ctx->ex_addr[base]->ex_hdrflags = htonl(VLCONTBLOCK);
415 blockindex = ntohl(ctx->cheader->vital_header.eofPtr);
416 code =
417 vlwrite(ctx->trans, blockindex, (char *)ctx->ex_addr[base],
418 VL_ADDREXTBLK_SIZE);
419 if (code)
420 ERROR_EXIT(VL_IO);
421
422 /* Update the cheader.vitalheader structure on disk */
423 ctx->cheader->vital_header.eofPtr = blockindex + VL_ADDREXTBLK_SIZE;
424 ctx->cheader->vital_header.eofPtr = htonl(ctx->cheader->vital_header.eofPtr);
425 code = write_vital_vlheader(ctx);
426 if (code)
427 ERROR_EXIT(VL_IO);
428
429 /* Write the address of the base extension block in the vldb header */
430 if (base == 0) {
431 ctx->cheader->SIT = htonl(blockindex);
432 code =
433 vlwrite(ctx->trans, DOFFSET(0, ctx->cheader, &ctx->cheader->SIT),
434 (char *)&ctx->cheader->SIT, sizeof(ctx->cheader->SIT));
435 if (code)
436 ERROR_EXIT(VL_IO);
437 }
438
439 /* Write the address of this extension block into the base extension block */
440 ctx->ex_addr[0]->ex_contaddrs[base] = htonl(blockindex);
441 code =
442 vlwrite(ctx->trans, ntohl(ctx->cheader->SIT), ctx->ex_addr[0],
443 sizeof(struct extentaddr));
444 if (code)
445 ERROR_EXIT(VL_IO);
446 }
447
448 error_exit:
449 return error;
450 }
451
452
453 afs_int32
454 FindExtentBlock(struct vl_ctx *ctx, afsUUID *uuidp,
455 afs_int32 createit, afs_int32 hostslot,
456 struct extentaddr **expp, afs_int32 *basep)
457 {
458 afsUUID tuuid;
459 struct extentaddr *exp;
460 afs_int32 i, j, code, base, index, error = 0;
461
462 *expp = NULL;
463 *basep = 0;
464
465 /* Create the first extension block if it does not exist */
466 if (!ctx->cheader->SIT) {
467 code = GetExtentBlock(ctx, 0);
468 if (code)
469 ERROR_EXIT(code);
470 }
471
472 for (i = 0; i < MAXSERVERID + 1; i++) {
473 if ((ctx->hostaddress[i] & 0xff000000) == 0xff000000) {
474 if ((base = (ctx->hostaddress[i] >> 16) & 0xff) > VL_MAX_ADDREXTBLKS) {
475 ERROR_EXIT(VL_INDEXERANGE);
476 }
477 if ((index = ctx->hostaddress[i] & 0x0000ffff) > VL_MHSRV_PERBLK) {
478 ERROR_EXIT(VL_INDEXERANGE);
479 }
480 exp = &ctx->ex_addr[base][index];
481 tuuid = exp->ex_hostuuid;
482 afs_ntohuuid(&tuuid);
483 if (afs_uuid_equal(uuidp, &tuuid)) {
484 *expp = exp;
485 *basep = base;
486 ERROR_EXIT(0);
487 }
488 }
489 }
490
491 if (createit) {
492 if (hostslot == -1) {
493 for (i = 0; i < MAXSERVERID + 1; i++) {
494 if (!ctx->hostaddress[i])
495 break;
496 }
497 if (i > MAXSERVERID)
498 ERROR_EXIT(VL_REPSFULL);
499 } else {
500 i = hostslot;
501 }
502
503 for (base = 0; base < VL_MAX_ADDREXTBLKS; base++) {
504 if (!ctx->ex_addr[0]->ex_contaddrs[base]) {
505 code = GetExtentBlock(ctx, base);
506 if (code)
507 ERROR_EXIT(code);
508 }
509 for (j = 1; j < VL_MHSRV_PERBLK; j++) {
510 exp = &ctx->ex_addr[base][j];
511 tuuid = exp->ex_hostuuid;
512 afs_ntohuuid(&tuuid);
513 if (afs_uuid_is_nil(&tuuid)) {
514 tuuid = *uuidp;
515 afs_htonuuid(&tuuid);
516 exp->ex_hostuuid = tuuid;
517 code =
518 vlwrite(ctx->trans,
519 DOFFSET(ntohl(ctx->ex_addr[0]->ex_contaddrs[base]),
520 (char *)ctx->ex_addr[base], (char *)exp),
521 (char *)&tuuid, sizeof(tuuid));
522 if (code)
523 ERROR_EXIT(VL_IO);
524 ctx->hostaddress[i] =
525 0xff000000 | ((base << 16) & 0xff0000) | (j & 0xffff);
526 *expp = exp;
527 *basep = base;
528 if (vldbversion != VLDBVERSION_4) {
529 ctx->cheader->vital_header.vldbversion =
530 htonl(VLDBVERSION_4);
531 code = write_vital_vlheader(ctx);
532 if (code)
533 ERROR_EXIT(VL_IO);
534 }
535 ctx->cheader->IpMappedAddr[i] = htonl(ctx->hostaddress[i]);
536 code =
537 vlwrite(ctx->trans,
538 DOFFSET(0, ctx->cheader,
539 &ctx->cheader->IpMappedAddr[i]),
540 (char *)&ctx->cheader->IpMappedAddr[i],
541 sizeof(afs_int32));
542 if (code)
543 ERROR_EXIT(VL_IO);
544 ERROR_EXIT(0);
545 }
546 }
547 }
548 ERROR_EXIT(VL_REPSFULL); /* No reason to utilize a new error code */
549 }
550
551 error_exit:
552 return error;
553 }
554
555 /* Allocate a free block of storage for entry, returning address of a new
556 zeroed entry (or zero if something is wrong). */
557 afs_int32
558 AllocBlock(struct vl_ctx *ctx, struct nvlentry *tentry)
559 {
560 afs_int32 blockindex;
561
562 if (ctx->cheader->vital_header.freePtr) {
563 /* allocate this dude */
564 blockindex = ntohl(ctx->cheader->vital_header.freePtr);
565 if (vlentryread(ctx->trans, blockindex, (char *)tentry, sizeof(vlentry)))
566 return 0;
567 ctx->cheader->vital_header.freePtr = htonl(tentry->nextIdHash[0]);
568 } else {
569 /* hosed, nothing on free list, grow file */
570 blockindex = ntohl(ctx->cheader->vital_header.eofPtr); /* remember this guy */
571 ctx->cheader->vital_header.eofPtr = htonl(blockindex + sizeof(vlentry));
572 }
573 ctx->cheader->vital_header.allocs++;
574 if (write_vital_vlheader(ctx))
575 return 0;
576 memset(tentry, 0, sizeof(nvlentry)); /* zero new entry */
577 return blockindex;
578 }
579
580
581 /* Free a block given its index. It must already have been unthreaded. Returns zero for success or an error code on failure. */
582 int
583 FreeBlock(struct vl_ctx *ctx, afs_int32 blockindex)
584 {
585 struct nvlentry tentry;
586
587 /* check validity of blockindex just to be on the safe side */
588 if (!index_OK(ctx, blockindex))
589 return VL_BADINDEX;
590 memset(&tentry, 0, sizeof(nvlentry));
591 tentry.nextIdHash[0] = ctx->cheader->vital_header.freePtr; /* already in network order */
592 tentry.flags = htonl(VLFREE);
593 ctx->cheader->vital_header.freePtr = htonl(blockindex);
594 if (vlwrite(ctx->trans, blockindex, (char *)&tentry, sizeof(nvlentry)))
595 return VL_IO;
596 ctx->cheader->vital_header.frees++;
597 if (write_vital_vlheader(ctx))
598 return VL_IO;
599 return 0;
600 }
601
602
603 /* Look for a block by volid and voltype (if not known use -1 which searches
604 * all 3 volid hash lists. Note that the linked lists are read in first from
605 * the database header. If found read the block's contents into the area
606 * pointed to by tentry and return the block's index. If not found return 0.
607 */
608 afs_int32
609 FindByID(struct vl_ctx *ctx, afs_uint32 volid, afs_int32 voltype,
610 struct nvlentry *tentry, afs_int32 *error)
611 {
612 afs_int32 typeindex, hashindex, blockindex;
613
614 *error = 0;
615 hashindex = IDHash(volid);
616 if (voltype == -1) {
617 /* Should we have one big hash table for volids as opposed to the three ones? */
618 for (typeindex = 0; typeindex < MAXTYPES; typeindex++) {
619 for (blockindex = ntohl(ctx->cheader->VolidHash[typeindex][hashindex]);
620 blockindex != NULLO;
621 blockindex = tentry->nextIdHash[typeindex]) {
622 if (vlentryread
623 (ctx->trans, blockindex, (char *)tentry, sizeof(nvlentry))) {
624 *error = VL_IO;
625 return 0;
626 }
627 if (volid == tentry->volumeId[typeindex])
628 return blockindex;
629 }
630 }
631 } else {
632 for (blockindex = ntohl(ctx->cheader->VolidHash[voltype][hashindex]);
633 blockindex != NULLO; blockindex = tentry->nextIdHash[voltype]) {
634 if (vlentryread
635 (ctx->trans, blockindex, (char *)tentry, sizeof(nvlentry))) {
636 *error = VL_IO;
637 return 0;
638 }
639 if (volid == tentry->volumeId[voltype])
640 return blockindex;
641 }
642 }
643 return 0; /* no such entry */
644 }
645
646
647 /* Look for a block by volume name. If found read the block's contents into
648 * the area pointed to by tentry and return the block's index. If not
649 * found return 0.
650 */
651 afs_int32
652 FindByName(struct vl_ctx *ctx, char *volname, struct nvlentry *tentry,
653 afs_int32 *error)
654 {
655 afs_int32 hashindex;
656 afs_int32 blockindex;
657 char tname[VL_MAXNAMELEN];
658
659 /* remove .backup or .readonly extensions for stupid backwards
660 * compatibility
661 */
662 hashindex = strlen(volname); /* really string length */
663 if (hashindex >= 8 && strcmp(volname + hashindex - 7, ".backup") == 0) {
664 /* this is a backup volume */
665 strcpy(tname, volname);
666 tname[hashindex - 7] = 0; /* zap extension */
667 } else if (hashindex >= 10
668 && strcmp(volname + hashindex - 9, ".readonly") == 0) {
669 /* this is a readonly volume */
670 strcpy(tname, volname);
671 tname[hashindex - 9] = 0; /* zap extension */
672 } else
673 strcpy(tname, volname);
674
675 *error = 0;
676 hashindex = NameHash(tname);
677 for (blockindex = ntohl(ctx->cheader->VolnameHash[hashindex]);
678 blockindex != NULLO; blockindex = tentry->nextNameHash) {
679 if (vlentryread(ctx->trans, blockindex, (char *)tentry, sizeof(nvlentry))) {
680 *error = VL_IO;
681 return 0;
682 }
683 if (!strcmp(tname, tentry->name))
684 return blockindex;
685 }
686 return 0; /* no such entry */
687 }
688
689 /**
690 * Returns whether or not any of the supplied volume IDs already exist
691 * in the vldb.
692 *
693 * @param ctx transaction context
694 * @param ids an array of volume IDs
695 * @param ids_len the number of elements in the 'ids' array
696 * @param error filled in with an error code in case of error
697 *
698 * @return whether any of the volume IDs are already used
699 * @retval 1 at least one of the volume IDs is already used
700 * @retval 0 none of the volume IDs are used, or an error occurred
701 */
702 int
703 EntryIDExists(struct vl_ctx *ctx, const afs_uint32 *ids,
704 afs_int32 ids_len, afs_int32 *error)
705 {
706 afs_int32 typeindex;
707 struct nvlentry tentry;
708
709 *error = 0;
710
711 for (typeindex = 0; typeindex < ids_len; typeindex++) {
712 if (ids[typeindex]
713 && FindByID(ctx, ids[typeindex], -1, &tentry, error)) {
714
715 return 1;
716 } else if (*error) {
717 return 0;
718 }
719 }
720
721 return 0;
722 }
723
724 /**
725 * Finds the next range of unused volume IDs in the vldb.
726 *
727 * @param ctx transaction context
728 * @param maxvolid the current max vol ID, and where to start looking
729 * for an unused volume ID range
730 * @param bump how many volume IDs we need to be unused
731 * @param error filled in with an error code in case of error
732 *
733 * @return the next volume ID 'volid' such that the range
734 * [volid, volid+bump) of volume IDs is unused, or 0 if there's
735 * an error
736 */
737 afs_uint32
738 NextUnusedID(struct vl_ctx *ctx, afs_uint32 maxvolid, afs_uint32 bump,
739 afs_int32 *error)
740 {
741 struct nvlentry tentry;
742 afs_uint32 id;
743 afs_uint32 nfree;
744
745 *error = 0;
746
747 /* we simply start at the given maxvolid, keep a running tally of
748 * how many free volume IDs we've seen in a row, and return when
749 * we've seen 'bump' unused IDs in a row */
750 for (id = maxvolid, nfree = 0; nfree < bump; ++id) {
751 if (FindByID(ctx, id, -1, &tentry, error)) {
752 nfree = 0;
753 } else if (*error) {
754 return 0;
755 } else {
756 ++nfree;
757 }
758 }
759
760 /* 'id' is now at the end of the [maxvolid,maxvolid+bump) range,
761 * but we need to return the first unused id, so subtract the
762 * number of current running free IDs to get the beginning */
763 return id - nfree;
764 }
765
766 int
767 HashNDump(struct vl_ctx *ctx, int hashindex)
768 {
769 int i = 0;
770 int blockindex;
771 struct nvlentry tentry;
772
773 for (blockindex = ntohl(ctx->cheader->VolnameHash[hashindex]);
774 blockindex != NULLO; blockindex = tentry.nextNameHash) {
775 if (vlentryread(ctx->trans, blockindex, (char *)&tentry, sizeof(nvlentry)))
776 return 0;
777 i++;
778 VLog(0,
779 ("[%d]#%d: %10d %d %d (%s)\n", hashindex, i, tentry.volumeId[0],
780 tentry.nextIdHash[0], tentry.nextNameHash, tentry.name));
781 }
782 return 0;
783 }
784
785
786 int
787 HashIdDump(struct vl_ctx *ctx, int hashindex)
788 {
789 int i = 0;
790 int blockindex;
791 struct nvlentry tentry;
792
793 for (blockindex = ntohl(ctx->cheader->VolidHash[0][hashindex]);
794 blockindex != NULLO; blockindex = tentry.nextIdHash[0]) {
795 if (vlentryread(ctx->trans, blockindex, (char *)&tentry, sizeof(nvlentry)))
796 return 0;
797 i++;
798 VLog(0,
799 ("[%d]#%d: %10d %d %d (%s)\n", hashindex, i, tentry.volumeId[0],
800 tentry.nextIdHash[0], tentry.nextNameHash, tentry.name));
801 }
802 return 0;
803 }
804
805
806 /* Add a block to the hash table given a pointer to the block and its index.
807 * The block is threaded onto both hash tables and written to disk. The
808 * routine returns zero if there were no errors.
809 */
810 int
811 ThreadVLentry(struct vl_ctx *ctx, afs_int32 blockindex,
812 struct nvlentry *tentry)
813 {
814 int errorcode;
815
816 if (!index_OK(ctx, blockindex))
817 return VL_BADINDEX;
818 /* Insert into volid's hash linked list */
819 if ((errorcode = HashVolid(ctx, RWVOL, blockindex, tentry)))
820 return errorcode;
821
822 /* For rw entries we also enter the RO and BACK volume ids (if they
823 * exist) in the hash tables; note all there volids (RW, RO, BACK)
824 * should not be hashed yet! */
825 if (tentry->volumeId[ROVOL]) {
826 if ((errorcode = HashVolid(ctx, ROVOL, blockindex, tentry)))
827 return errorcode;
828 }
829 if (tentry->volumeId[BACKVOL]) {
830 if ((errorcode = HashVolid(ctx, BACKVOL, blockindex, tentry)))
831 return errorcode;
832 }
833
834 /* Insert into volname's hash linked list */
835 HashVolname(ctx, blockindex, tentry);
836
837 /* Update cheader entry */
838 if (write_vital_vlheader(ctx))
839 return VL_IO;
840
841 /* Update hash list pointers in the entry itself */
842 if (vlentrywrite(ctx->trans, blockindex, (char *)tentry, sizeof(nvlentry)))
843 return VL_IO;
844 return 0;
845 }
846
847
848 /* Remove a block from both the hash tables. If success return 0, else
849 * return an error code. */
850 int
851 UnthreadVLentry(struct vl_ctx *ctx, afs_int32 blockindex,
852 struct nvlentry *aentry)
853 {
854 afs_int32 errorcode, typeindex;
855
856 if (!index_OK(ctx, blockindex))
857 return VL_BADINDEX;
858 if ((errorcode = UnhashVolid(ctx, RWVOL, blockindex, aentry)))
859 return errorcode;
860
861 /* Take the RO/RW entries of their respective hash linked lists. */
862 for (typeindex = ROVOL; typeindex <= BACKVOL; typeindex++) {
863 if ((errorcode = UnhashVolid(ctx, typeindex, blockindex, aentry)))
864 return errorcode;
865 }
866
867 /* Take it out of the Volname hash list */
868 if ((errorcode = UnhashVolname(ctx, blockindex, aentry)))
869 return errorcode;
870
871 /* Update cheader entry */
872 write_vital_vlheader(ctx);
873
874 return 0;
875 }
876
877 /* cheader must have be read before this routine is called. */
878 int
879 HashVolid(struct vl_ctx *ctx, afs_int32 voltype, afs_int32 blockindex,
880 struct nvlentry *tentry)
881 {
882 afs_int32 hashindex, errorcode;
883 struct nvlentry ventry;
884
885 if (FindByID
886 (ctx, tentry->volumeId[voltype], voltype, &ventry, &errorcode))
887 return VL_IDALREADYHASHED;
888 else if (errorcode)
889 return errorcode;
890 hashindex = IDHash(tentry->volumeId[voltype]);
891 tentry->nextIdHash[voltype] =
892 ntohl(ctx->cheader->VolidHash[voltype][hashindex]);
893 ctx->cheader->VolidHash[voltype][hashindex] = htonl(blockindex);
894 if (vlwrite
895 (ctx->trans, DOFFSET(0, ctx->cheader, &ctx->cheader->VolidHash[voltype][hashindex]),
896 (char *)&ctx->cheader->VolidHash[voltype][hashindex], sizeof(afs_int32)))
897 return VL_IO;
898 return 0;
899 }
900
901
902 /* cheader must have be read before this routine is called. */
903 int
904 UnhashVolid(struct vl_ctx *ctx, afs_int32 voltype, afs_int32 blockindex,
905 struct nvlentry *aentry)
906 {
907 int hashindex, nextblockindex, prevblockindex;
908 struct nvlentry tentry;
909 afs_int32 code;
910 afs_int32 temp;
911
912 if (aentry->volumeId[voltype] == NULLO) /* Assume no volume id */
913 return 0;
914 /* Take it out of the VolId[voltype] hash list */
915 hashindex = IDHash(aentry->volumeId[voltype]);
916 nextblockindex = ntohl(ctx->cheader->VolidHash[voltype][hashindex]);
917 if (nextblockindex == blockindex) {
918 /* First on the hash list; just adjust pointers */
919 ctx->cheader->VolidHash[voltype][hashindex] =
920 htonl(aentry->nextIdHash[voltype]);
921 code =
922 vlwrite(ctx->trans,
923 DOFFSET(0, ctx->cheader,
924 &ctx->cheader->VolidHash[voltype][hashindex]),
925 (char *)&ctx->cheader->VolidHash[voltype][hashindex],
926 sizeof(afs_int32));
927 if (code)
928 return VL_IO;
929 } else {
930 while (nextblockindex != blockindex) {
931 prevblockindex = nextblockindex; /* always done once */
932 if (vlentryread
933 (ctx->trans, nextblockindex, (char *)&tentry, sizeof(nvlentry)))
934 return VL_IO;
935 if ((nextblockindex = tentry.nextIdHash[voltype]) == NULLO)
936 return VL_NOENT;
937 }
938 temp = tentry.nextIdHash[voltype] = aentry->nextIdHash[voltype];
939 temp = htonl(temp); /* convert to network byte order before writing */
940 if (vlwrite
941 (ctx->trans,
942 DOFFSET(prevblockindex, &tentry, &tentry.nextIdHash[voltype]),
943 (char *)&temp, sizeof(afs_int32)))
944 return VL_IO;
945 }
946 aentry->nextIdHash[voltype] = 0;
947 return 0;
948 }
949
950
951 int
952 HashVolname(struct vl_ctx *ctx, afs_int32 blockindex,
953 struct nvlentry *aentry)
954 {
955 afs_int32 hashindex;
956 afs_int32 code;
957
958 /* Insert into volname's hash linked list */
959 hashindex = NameHash(aentry->name);
960 aentry->nextNameHash = ntohl(ctx->cheader->VolnameHash[hashindex]);
961 ctx->cheader->VolnameHash[hashindex] = htonl(blockindex);
962 code =
963 vlwrite(ctx->trans, DOFFSET(0, ctx->cheader, &ctx->cheader->VolnameHash[hashindex]),
964 (char *)&ctx->cheader->VolnameHash[hashindex], sizeof(afs_int32));
965 if (code)
966 return VL_IO;
967 return 0;
968 }
969
970
971 int
972 UnhashVolname(struct vl_ctx *ctx, afs_int32 blockindex,
973 struct nvlentry *aentry)
974 {
975 afs_int32 hashindex, nextblockindex, prevblockindex;
976 struct nvlentry tentry;
977 afs_int32 temp;
978
979 /* Take it out of the Volname hash list */
980 hashindex = NameHash(aentry->name);
981 nextblockindex = ntohl(ctx->cheader->VolnameHash[hashindex]);
982 if (nextblockindex == blockindex) {
983 /* First on the hash list; just adjust pointers */
984 ctx->cheader->VolnameHash[hashindex] = htonl(aentry->nextNameHash);
985 if (vlwrite
986 (ctx->trans, DOFFSET(0, ctx->cheader, &ctx->cheader->VolnameHash[hashindex]),
987 (char *)&ctx->cheader->VolnameHash[hashindex], sizeof(afs_int32)))
988 return VL_IO;
989 } else {
990 while (nextblockindex != blockindex) {
991 prevblockindex = nextblockindex; /* always done at least once */
992 if (vlentryread
993 (ctx->trans, nextblockindex, (char *)&tentry, sizeof(nvlentry)))
994 return VL_IO;
995 if ((nextblockindex = tentry.nextNameHash) == NULLO)
996 return VL_NOENT;
997 }
998 tentry.nextNameHash = aentry->nextNameHash;
999 temp = htonl(tentry.nextNameHash);
1000 if (vlwrite
1001 (ctx->trans, DOFFSET(prevblockindex, &tentry, &tentry.nextNameHash),
1002 (char *)&temp, sizeof(afs_int32)))
1003 return VL_IO;
1004 }
1005 aentry->nextNameHash = 0;
1006 return 0;
1007 }
1008
1009
1010 /* Returns the vldb entry tentry at offset index; remaining is the number of
1011 * entries left; the routine also returns the index of the next sequential
1012 * entry in the vldb
1013 */
1014
1015 afs_int32
1016 NextEntry(struct vl_ctx *ctx, afs_int32 blockindex,
1017 struct nvlentry *tentry, afs_int32 *remaining)
1018 {
1019 afs_int32 lastblockindex;
1020
1021 if (blockindex == 0) /* get first one */
1022 blockindex = sizeof(*ctx->cheader);
1023 else {
1024 if (!index_OK(ctx, blockindex)) {
1025 *remaining = -1; /* error */
1026 return 0;
1027 }
1028 blockindex += sizeof(nvlentry);
1029 }
1030 /* now search for the first entry that isn't free */
1031 for (lastblockindex = ntohl(ctx->cheader->vital_header.eofPtr);
1032 blockindex < lastblockindex;) {
1033 if (vlentryread(ctx->trans, blockindex, (char *)tentry, sizeof(nvlentry))) {
1034 *remaining = -1;
1035 return 0;
1036 }
1037 if (tentry->flags == VLCONTBLOCK) {
1038 /*
1039 * This is a special mh extension block just simply skip over it
1040 */
1041 blockindex += VL_ADDREXTBLK_SIZE;
1042 } else {
1043 if (tentry->flags != VLFREE) {
1044 /* estimate remaining number of entries, not including this one */
1045 *remaining =
1046 (lastblockindex - blockindex) / sizeof(nvlentry) - 1;
1047 return blockindex;
1048 }
1049 blockindex += sizeof(nvlentry);
1050 }
1051 }
1052 *remaining = 0; /* no more entries */
1053 return 0;
1054 }
1055
1056
1057 /* Routine to verify that index is a legal offset to a vldb entry in the
1058 * table
1059 */
1060 static int
1061 index_OK(struct vl_ctx *ctx, afs_int32 blockindex)
1062 {
1063 if ((blockindex < sizeof(*ctx->cheader))
1064 || (blockindex >= ntohl(ctx->cheader->vital_header.eofPtr)))
1065 return 0;
1066 return 1;
1067 }
1068
1069 /* makes a deep copy of src_ex into dst_ex */
1070 static int
1071 vlexcpy(struct extentaddr **dst_ex, struct extentaddr **src_ex)
1072 {
1073 int i;
1074 for (i = 0; i < VL_MAX_ADDREXTBLKS; i++) {
1075 if (src_ex[i]) {
1076 if (!dst_ex[i]) {
1077 dst_ex[i] = malloc(VL_ADDREXTBLK_SIZE);
1078 }
1079 if (!dst_ex[i]) {
1080 return VL_NOMEM;
1081 }
1082 memcpy(dst_ex[i], src_ex[i], VL_ADDREXTBLK_SIZE);
1083
1084 } else if (dst_ex[i]) {
1085 /* we have no src, but we have a dst... meaning, this block
1086 * has gone away */
1087 free(dst_ex[i]);
1088 dst_ex[i] = NULL;
1089 }
1090 }
1091 return 0;
1092 }
1093
1094 int
1095 vlsetcache(struct vl_ctx *ctx, int locktype)
1096 {
1097 if (locktype == LOCKREAD) {
1098 ctx->hostaddress = rd_HostAddress;
1099 ctx->ex_addr = rd_ex_addr;
1100 ctx->cheader = &rd_cheader;
1101 return 0;
1102 } else {
1103 memcpy(wr_HostAddress, rd_HostAddress, sizeof(wr_HostAddress));
1104 memcpy(&wr_cheader, &rd_cheader, sizeof(wr_cheader));
1105
1106 ctx->hostaddress = wr_HostAddress;
1107 ctx->ex_addr = wr_ex_addr;
1108 ctx->cheader = &wr_cheader;
1109
1110 return vlexcpy(wr_ex_addr, rd_ex_addr);
1111 }
1112 }
1113
1114 int
1115 vlsynccache(void)
1116 {
1117 memcpy(rd_HostAddress, wr_HostAddress, sizeof(rd_HostAddress));
1118 memcpy(&rd_cheader, &wr_cheader, sizeof(rd_cheader));
1119 return vlexcpy(rd_ex_addr, wr_ex_addr);
1120 }