Import Upstream version 1.8.5
[hcoop/debian/openafs.git] / src / budb / db_dump.c
CommitLineData
805e021f
CE
1/*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10/* dump the database
11 * Dump is made to a local file. Structures are dumped in network byte order
12 * for transportability between hosts
13 */
14
15#include <afsconfig.h>
16#include <afs/param.h>
17#include <afs/stds.h>
18
19#include <roken.h>
20
21#include <afs/opr.h>
22
23#ifdef AFS_PTHREAD_ENV
24# include <opr/lock.h>
25#endif
26
27#include <ubik.h>
28#include <afs/audit.h>
29
30#include "database.h"
31#include "budb.h"
32#include "globals.h"
33#include "error_macros.h"
34#include "budb_errs.h"
35#include "budb_internal.h"
36
37
38/* dump ubik database - routines to scan the database and dump all
39 * the information
40 */
41
42/* -----------------------
43 * synchronization on pipe
44 * -----------------------
45 */
46
47/* interlocking for database dump */
48
49dumpSyncT dumpSync;
50dumpSyncP dumpSyncPtr = &dumpSync;
51
52
53/* canWrite
54 * check if we should dump more of the database. Waits for the reader
55 * to drain the information before allowing the writer to proceed.
56 * exit:
57 * 1 - ok to write
58 */
59
60afs_int32
61canWrite(int fid)
62{
63#ifndef AFS_PTHREAD_ENV
64 afs_int32 code = 0;
65#endif
66 extern dumpSyncP dumpSyncPtr;
67
68 ObtainWriteLock(&dumpSyncPtr->ds_lock);
69
70 /* let the pipe drain */
71 while (dumpSyncPtr->ds_bytes > 0) {
72 if (dumpSyncPtr->ds_readerStatus == DS_WAITING) {
73 dumpSyncPtr->ds_readerStatus = 0;
74#ifdef AFS_PTHREAD_ENV
75 opr_cv_broadcast(&dumpSyncPtr->ds_readerStatus_cond);
76#else
77 code = LWP_SignalProcess(&dumpSyncPtr->ds_readerStatus);
78 if (code)
79 LogError(code, "canWrite: Signal delivery failed\n");
80#endif
81 }
82 dumpSyncPtr->ds_writerStatus = DS_WAITING;
83 ReleaseWriteLock(&dumpSyncPtr->ds_lock);
84#ifdef AFS_PTHREAD_ENV
85 MUTEX_ENTER(&dumpSyncPtr->ds_writerStatus_mutex);
86 opr_cv_wait(&dumpSyncPtr->ds_writerStatus_cond, &dumpSyncPtr->ds_writerStatus_mutex);
87 MUTEX_EXIT(&dumpSyncPtr->ds_writerStatus_mutex);
88#else
89 LWP_WaitProcess(&dumpSyncPtr->ds_writerStatus);
90#endif
91 ObtainWriteLock(&dumpSyncPtr->ds_lock);
92 }
93 return (1);
94}
95
96
97/* haveWritten
98 * record the fact that nbytes have been written. Signal the reader
99 * to proceed, and unlock.
100 * exit:
101 * no return value
102 */
103
104void
105haveWritten(afs_int32 nbytes)
106{
107#ifndef AFS_PTHREAD_ENV
108 afs_int32 code = 0;
109#endif
110 extern dumpSyncP dumpSyncPtr;
111
112 dumpSyncPtr->ds_bytes += nbytes;
113 if (dumpSyncPtr->ds_readerStatus == DS_WAITING) {
114 dumpSyncPtr->ds_readerStatus = 0;
115#ifdef AFS_PTHREAD_ENV
116 opr_cv_broadcast(&dumpSyncPtr->ds_readerStatus_cond);
117#else
118 code = LWP_SignalProcess(&dumpSyncPtr->ds_readerStatus);
119 if (code)
120 LogError(code, "haveWritten: Signal delivery failed\n");
121#endif
122 }
123 ReleaseWriteLock(&dumpSyncPtr->ds_lock);
124}
125
126/* doneWriting
127 * wait for the reader to drain all the information, and then set the
128 * done flag.
129 */
130
131void
132doneWriting(afs_int32 error)
133{
134#ifndef AFS_PTHREAD_ENV
135 afs_int32 code = 0;
136#endif
137
138 /* wait for the reader */
139 ObtainWriteLock(&dumpSyncPtr->ds_lock);
140 while (dumpSyncPtr->ds_readerStatus != DS_WAITING) {
141 LogDebug(4, "doneWriting: waiting for Reader\n");
142 dumpSyncPtr->ds_writerStatus = DS_WAITING;
143 ReleaseWriteLock(&dumpSyncPtr->ds_lock);
144#ifdef AFS_PTHREAD_ENV
145 MUTEX_ENTER(&dumpSyncPtr->ds_writerStatus_mutex);
146 opr_cv_wait(&dumpSyncPtr->ds_writerStatus_cond, &dumpSyncPtr->ds_writerStatus_mutex);
147 MUTEX_EXIT(&dumpSyncPtr->ds_writerStatus_mutex);
148#else
149 LWP_WaitProcess(&dumpSyncPtr->ds_writerStatus);
150#endif
151 ObtainWriteLock(&dumpSyncPtr->ds_lock);
152 }
153
154 LogDebug(4, "doneWriting: setting done\n");
155
156 /* signal that we are done */
157 if (error)
158 dumpSyncPtr->ds_writerStatus = DS_DONE_ERROR;
159 else
160 dumpSyncPtr->ds_writerStatus = DS_DONE;
161 dumpSyncPtr->ds_readerStatus = 0;
162#ifdef AFS_PTHREAD_ENV
163 opr_cv_broadcast(&dumpSyncPtr->ds_readerStatus_cond);
164#else
165 code = LWP_NoYieldSignal(&dumpSyncPtr->ds_readerStatus);
166 if (code)
167 LogError(code, "doneWriting: Signal delivery failed\n");
168#endif
169 ReleaseWriteLock(&dumpSyncPtr->ds_lock);
170}
171
172/* notes:
173 * ut - setup and pass down
174 */
175
176/* writeStructHeader
177 * write header appropriate for requested structure type
178 */
179
180afs_int32
181writeStructHeader(int fid, afs_int32 type)
182{
183 struct structDumpHeader hostDumpHeader, netDumpHeader;
184
185 hostDumpHeader.type = type;
186 hostDumpHeader.structversion = 1;
187
188
189 switch (type) {
190 case SD_DBHEADER:
191 hostDumpHeader.size = sizeof(struct DbHeader);
192 break;
193
194 case SD_DUMP:
195 hostDumpHeader.size = sizeof(struct budb_dumpEntry);
196 break;
197
198 case SD_TAPE:
199 hostDumpHeader.size = sizeof(struct budb_tapeEntry);
200 break;
201
202 case SD_VOLUME:
203 hostDumpHeader.size = sizeof(struct budb_volumeEntry);
204 break;
205
206 case SD_END:
207 hostDumpHeader.size = 0;
208 break;
209
210 default:
211 LogError(0, "writeStructHeader: invalid type %d\n", type);
212 BUDB_EXIT(1);
213 }
214
215 structDumpHeader_hton(&hostDumpHeader, &netDumpHeader);
216
217 if (canWrite(fid) <= 0)
218 return (BUDB_DUMPFAILED);
219 if (write(fid, &netDumpHeader, sizeof(netDumpHeader)) !=
220 sizeof(netDumpHeader))
221 return (BUDB_DUMPFAILED);
222 haveWritten(sizeof(netDumpHeader));
223
224 return (0);
225}
226
227/* writeTextHeader
228 * write header appropriate for requested structure type
229 */
230
231afs_int32
232writeTextHeader(int fid, afs_int32 type)
233{
234 struct structDumpHeader hostDumpHeader, netDumpHeader;
235
236 hostDumpHeader.structversion = 1;
237
238 switch (type) {
239 case TB_DUMPSCHEDULE:
240 hostDumpHeader.type = SD_TEXT_DUMPSCHEDULE;
241 break;
242
243 case TB_VOLUMESET:
244 hostDumpHeader.type = SD_TEXT_VOLUMESET;
245 break;
246
247 case TB_TAPEHOSTS:
248 hostDumpHeader.type = SD_TEXT_TAPEHOSTS;
249 break;
250
251 default:
252 LogError(0, "writeTextHeader: invalid type %d\n", type);
253 BUDB_EXIT(1);
254 }
255
256 hostDumpHeader.size = ntohl(db.h.textBlock[type].size);
257 structDumpHeader_hton(&hostDumpHeader, &netDumpHeader);
258
259 if (canWrite(fid) <= 0)
260 return (BUDB_DUMPFAILED);
261
262 if (write(fid, &netDumpHeader, sizeof(netDumpHeader)) !=
263 sizeof(netDumpHeader))
264 return (BUDB_DUMPFAILED);
265
266 haveWritten(sizeof(netDumpHeader));
267
268 return (0);
269}
270
271afs_int32
272writeDbHeader(int fid)
273{
274 struct DbHeader header;
275 afs_int32 curtime;
276 afs_int32 code = 0, tcode;
277
278 extern struct memoryDB db;
279
280 /* check the memory database header for integrity */
281 if (db.h.version != db.h.checkVersion)
282 ERROR(BUDB_DATABASEINCONSISTENT);
283
284 curtime = time(0);
285
286 /* copy selected fields. Source is in xdr format. */
287 header.dbversion = db.h.version;
288 header.created = htonl(curtime);
289 strcpy(header.cell, "");
290 header.lastDumpId = db.h.lastDumpId;
291 header.lastInstanceId = db.h.lastInstanceId;
292 header.lastTapeId = db.h.lastTapeId;
293
294 tcode = writeStructHeader(fid, SD_DBHEADER);
295 if (tcode)
296 ERROR(tcode);
297
298 if (canWrite(fid) <= 0)
299 ERROR(BUDB_DUMPFAILED);
300
301 if (write(fid, &header, sizeof(header)) != sizeof(header))
302 ERROR(BUDB_DUMPFAILED);
303
304 haveWritten(sizeof(header));
305
306 error_exit:
307 return (code);
308}
309
310/* writeDump
311 * write out a dump entry structure
312 */
313
314afs_int32
315writeDump(int fid, dbDumpP dumpPtr)
316{
317 struct budb_dumpEntry dumpEntry;
318 afs_int32 code = 0, tcode;
319
320 tcode = dumpToBudbDump(dumpPtr, &dumpEntry);
321 if (tcode)
322 ERROR(tcode);
323
324 writeStructHeader(fid, SD_DUMP);
325
326 if (canWrite(fid) <= 0)
327 ERROR(BUDB_DUMPFAILED);
328
329 if (write(fid, &dumpEntry, sizeof(dumpEntry)) != sizeof(dumpEntry))
330 ERROR(BUDB_DUMPFAILED);
331 haveWritten(sizeof(dumpEntry));
332
333 error_exit:
334 return (code);
335}
336
337afs_int32
338writeTape(int fid, struct tape *tapePtr, afs_int32 dumpid)
339{
340 struct budb_tapeEntry tapeEntry;
341 afs_int32 code = 0, tcode;
342
343 tcode = writeStructHeader(fid, SD_TAPE);
344 if (tcode)
345 ERROR(tcode);
346
347 tapeToBudbTape(tapePtr, &tapeEntry);
348
349 tapeEntry.dump = htonl(dumpid);
350
351 if (canWrite(fid) <= 0)
352 ERROR(BUDB_DUMPFAILED);
353
354 if (write(fid, &tapeEntry, sizeof(tapeEntry)) != sizeof(tapeEntry))
355 ERROR(BUDB_DUMPFAILED);
356
357 haveWritten(sizeof(tapeEntry));
358
359 error_exit:
360 return (code);
361}
362
363/* combines volFragment and volInfo */
364
365afs_int32
366writeVolume(struct ubik_trans *ut, int fid, struct volFragment *volFragmentPtr,
367 struct volInfo *volInfoPtr, afs_int32 dumpid, char *tapeName)
368{
369 struct budb_volumeEntry budbVolume;
370 afs_int32 code = 0;
371
372 volsToBudbVol(volFragmentPtr, volInfoPtr, &budbVolume);
373
374 budbVolume.dump = htonl(dumpid);
375 strcpy(budbVolume.tape, tapeName);
376
377 writeStructHeader(fid, SD_VOLUME);
378
379 if (canWrite(fid) <= 0)
380 ERROR(BUDB_DUMPFAILED);
381
382 if (write(fid, &budbVolume, sizeof(budbVolume)) != sizeof(budbVolume))
383 ERROR(BUDB_DUMPFAILED);
384
385 haveWritten(sizeof(budbVolume));
386
387 error_exit:
388 return (code);
389}
390
391/* -------------------
392 * handlers for the text blocks
393 * -------------------
394 */
395
396/* checkLock
397 * make sure a text lock is NOT held
398 * exit:
399 * 0 - not held
400 * n - error
401 */
402
403afs_int32
404checkLock(afs_int32 textType)
405{
406 db_lockP lockPtr;
407
408 if ((textType < 0) || (textType > TB_NUM - 1))
409 return (BUDB_BADARGUMENT);
410
411 lockPtr = &db.h.textLocks[textType];
412
413 if (lockPtr->lockState != 0)
414 return (BUDB_LOCKED);
415 return (0);
416}
417
418/* checkText
419 * check the integrity of the specified text type
420 */
421
422int
423checkText(struct ubik_trans *ut, afs_int32 textType)
424{
425 struct textBlock *tbPtr;
426 afs_int32 nBytes = 0; /* accumulated actual size */
427 afs_int32 size;
428 struct block block;
429 dbadr blockAddr;
430
431 afs_int32 code = 0;
432
433 tbPtr = &db.h.textBlock[textType];
434 blockAddr = ntohl(tbPtr->textAddr);
435 size = ntohl(tbPtr->size);
436
437 while (blockAddr != 0) {
438 /* read the block */
439 code =
440 cdbread(ut, text_BLOCK, blockAddr, (char *)&block, sizeof(block));
441 if (code)
442 ERROR(code);
443
444 /* check its type */
445 if (block.h.type != text_BLOCK)
446 ERROR(BUDB_DATABASEINCONSISTENT);
447
448 /* add up the size */
449 nBytes += BLOCK_DATA_SIZE;
450
451 blockAddr = ntohl(block.h.next);
452 }
453
454 /* ensure that we have at least the expected amount of text */
455 if (nBytes < size)
456 ERROR(BUDB_DATABASEINCONSISTENT);
457
458 error_exit:
459 return (code);
460}
461
462/* writeText
463 * entry:
464 * textType - type of text block, e.g. TB_DUMPSCHEDULE
465 */
466
467afs_int32
468writeText(struct ubik_trans *ut, int fid, int textType)
469{
470 struct textBlock *tbPtr;
471 afs_int32 textSize, writeSize;
472 dbadr dbAddr;
473 struct block block;
474 afs_int32 code = 0;
475
476 /* check lock is free */
477 code = checkLock(textType);
478 if (code)
479 ERROR(code);
480
481 /* ensure that this block has the correct type */
482 code = checkText(ut, textType);
483 if (code) {
484 LogError(0, "writeText: text type %d damaged\n", textType);
485 ERROR(code);
486 }
487
488 tbPtr = &db.h.textBlock[textType];
489 textSize = ntohl(tbPtr->size);
490 dbAddr = ntohl(tbPtr->textAddr);
491
492 if (!dbAddr)
493 goto error_exit; /* Don't save anything if no blocks */
494
495 writeTextHeader(fid, textType);
496
497 while (dbAddr) {
498 code = cdbread(ut, text_BLOCK, dbAddr, (char *)&block, sizeof(block));
499 if (code)
500 ERROR(code);
501
502 writeSize = min(textSize, BLOCK_DATA_SIZE);
503 if (!writeSize)
504 break;
505
506 if (canWrite(fid) <= 0)
507 ERROR(BUDB_DUMPFAILED);
508
509 if (write(fid, &block.a[0], writeSize) != writeSize)
510 ERROR(BUDB_IO);
511
512 haveWritten(writeSize);
513 textSize -= writeSize;
514
515 dbAddr = ntohl(block.h.next);
516 }
517
518 error_exit:
519 return (code);
520}
521
522#define MAXAPPENDS 200
523
524afs_int32
525writeDatabase(struct ubik_trans *ut, int fid)
526{
527 dbadr dbAddr, dbAppAddr;
528 struct dump diskDump, apDiskDump;
529 dbadr tapeAddr;
530 struct tape diskTape;
531 dbadr volFragAddr;
532 struct volFragment diskVolFragment;
533 struct volInfo diskVolInfo;
534 int length, hash;
535 int old = 0;
536 int entrySize;
537 afs_int32 code = 0, tcode;
538 afs_int32 appDumpAddrs[MAXAPPENDS], numaddrs, appcount, j;
539
540 struct memoryHashTable *mht;
541
542 LogDebug(4, "writeDatabase:\n");
543
544 /* write out a header identifying this database etc */
545 tcode = writeDbHeader(fid);
546 if (tcode) {
547 LogError(tcode, "writeDatabase: Can't write Header\n");
548 ERROR(tcode);
549 }
550
551 /* write out the tree of dump structures */
552
553 mht = ht_GetType(HT_dumpIden_FUNCTION, &entrySize);
554 if (!mht) {
555 LogError(tcode, "writeDatabase: Can't get dump type\n");
556 ERROR(BUDB_BADARGUMENT);
557 }
558
559 for (old = 0; old <= 1; old++) {
560 /*oldnew */
561 /* only two states, old or not old */
562 length = (old ? mht->oldLength : mht->length);
563 if (!length)
564 continue;
565
566 for (hash = 0; hash < length; hash++) {
567 /*hashBuckets */
568 /* dump all the dumps in this hash bucket
569 */
570 for (dbAddr = ht_LookupBucket(ut, mht, hash, old); dbAddr; dbAddr = ntohl(diskDump.idHashChain)) { /*initialDumps */
571 /* now check if this dump had any errors/inconsistencies.
572 * If so, don't dump it
573 */
574 if (badEntry(dbAddr)) {
575 LogError(0,
576 "writeDatabase: Damaged dump entry at addr 0x%x\n",
577 dbAddr);
578 Log(" Skipping remainder of dumps on hash chain %d\n",
579 hash);
580 break;
581 }
582
583 tcode =
584 cdbread(ut, dump_BLOCK, dbAddr, &diskDump,
585 sizeof(diskDump));
586 if (tcode) {
587 LogError(tcode,
588 "writeDatabase: Can't read dump entry (addr 0x%x)\n",
589 dbAddr);
590 Log(" Skipping remainder of dumps on hash chain %d\n",
591 hash);
592 break;
593 }
594
595 /* Skip appended dumps, only start with initial dumps */
596 if (diskDump.initialDumpID != 0)
597 continue;
598
599 /* Skip appended dumps, only start with initial dumps. Then
600 * follow the appended dump chain so they are in order for restore.
601 */
602 appcount = numaddrs = 0;
603 for (dbAppAddr = dbAddr; dbAppAddr;
604 dbAppAddr = ntohl(apDiskDump.appendedDumpChain)) {
605 /*appendedDumps */
606 /* Check to see if we have a circular loop of appended dumps */
607 for (j = 0; j < numaddrs; j++) {
608 if (appDumpAddrs[j] == dbAppAddr)
609 break; /* circular loop */
610 }
611 if (j < numaddrs) { /* circular loop */
612 Log("writeDatabase: Circular loop found in appended dumps\n");
613 Log("Skipping rest of appended dumps of dumpID %u\n",
614 ntohl(diskDump.id));
615 break;
616 }
617 if (numaddrs >= MAXAPPENDS)
618 numaddrs = MAXAPPENDS - 1; /* don't overflow */
619 appDumpAddrs[numaddrs] = dbAppAddr;
620 numaddrs++;
621
622 /* If we dump a 1000 appended dumps, assume a loop */
623 if (appcount >= 5 * MAXAPPENDS) {
624 Log("writeDatabase: Potential circular loop of appended dumps\n");
625 Log("Skipping rest of appended dumps of dumpID %u. Dumped %d\n", ntohl(diskDump.id), appcount);
626 break;
627 }
628 appcount++;
629
630 /* Read the dump entry */
631 if (dbAddr == dbAppAddr) {
632 /* First time through, don't need to read the dump entry again */
633 memcpy(&apDiskDump, &diskDump, sizeof(diskDump));
634 } else {
635 if (badEntry(dbAppAddr)) {
636 LogError(0,
637 "writeDatabase: Damaged appended dump entry at addr 0x%x\n",
638 dbAddr);
639 Log(" Skipping this and remainder of appended dumps of initial DumpID %u\n", ntohl(diskDump.id));
640 break;
641 }
642
643 tcode =
644 cdbread(ut, dump_BLOCK, dbAppAddr, &apDiskDump,
645 sizeof(apDiskDump));
646 if (tcode) {
647 LogError(tcode,
648 "writeDatabase: Can't read appended dump entry (addr 0x%x)\n",
649 dbAppAddr);
650 Log(" Skipping this and remainder of appended dumps of initial DumpID %u\n", ntohl(diskDump.id));
651 break;
652 }
653
654 /* Verify that this appended dump points to the initial dump */
655 if (ntohl(apDiskDump.initialDumpID) !=
656 ntohl(diskDump.id)) {
657 LogError(0,
658 "writeDatabase: Appended dumpID %u does not reference initial dumpID %u\n",
659 ntohl(apDiskDump.id),
660 ntohl(diskDump.id));
661 Log(" Skipping this appended dump\n");
662 continue;
663 }
664 }
665
666 /* Save the dump entry */
667 tcode = writeDump(fid, &apDiskDump);
668 if (tcode) {
669 LogError(tcode,
670 "writeDatabase: Can't write dump entry\n");
671 ERROR(tcode);
672 }
673
674 /* For each tape on this dump
675 */
676 for (tapeAddr = ntohl(apDiskDump.firstTape); tapeAddr; tapeAddr = ntohl(diskTape.nextTape)) { /*tapes */
677 /* read the tape entry */
678 tcode =
679 cdbread(ut, tape_BLOCK, tapeAddr, &diskTape,
680 sizeof(diskTape));
681 if (tcode) {
682 LogError(tcode,
683 "writeDatabase: Can't read tape entry (addr 0x%x) of dumpID %u\n",
684 tapeAddr, ntohl(apDiskDump.id));
685 Log(" Skipping this and remaining tapes in the dump (and all their volumes)\n");
686 break;
687 }
688
689 /* Save the tape entry */
690 tcode =
691 writeTape(fid, &diskTape, ntohl(apDiskDump.id));
692 if (tcode) {
693 LogError(tcode,
694 "writeDatabase: Can't write tape entry\n");
695 ERROR(tcode);
696 }
697
698 /* For each volume on this tape.
699 */
700 for (volFragAddr = ntohl(diskTape.firstVol); volFragAddr; volFragAddr = ntohl(diskVolFragment.sameTapeChain)) { /*volumes */
701 /* Read the volume Fragment entry */
702 tcode =
703 cdbread(ut, volFragment_BLOCK, volFragAddr,
704 &diskVolFragment,
705 sizeof(diskVolFragment));
706 if (tcode) {
707 LogError(tcode,
708 "writeDatabase: Can't read volfrag entry (addr 0x%x) of dumpID %u\n",
709 volFragAddr, ntohl(apDiskDump.id));
710 Log(" Skipping this and remaining volumes on tape '%s'\n", diskTape.name);
711 break;
712 }
713
714 /* Read the volume Info entry */
715 tcode =
716 cdbread(ut, volInfo_BLOCK,
717 ntohl(diskVolFragment.vol),
718 &diskVolInfo, sizeof(diskVolInfo));
719 if (tcode) {
720 LogError(tcode,
721 "writeDatabase: Can't read volinfo entry (addr 0x%x) of dumpID %u\n",
722 ntohl(diskVolFragment.vol),
723 ntohl(apDiskDump.id));
724 Log(" Skipping volume on tape '%s'\n",
725 diskTape.name);
726 continue;
727 }
728
729 /* Save the volume entry */
730 tcode =
731 writeVolume(ut, fid, &diskVolFragment,
732 &diskVolInfo,
733 ntohl(apDiskDump.id),
734 diskTape.name);
735 if (tcode) {
736 LogError(tcode,
737 "writeDatabase: Can't write volume entry\n");
738 ERROR(tcode);
739 }
740 } /*volumes */
741 } /*tapes */
742 } /*appendedDumps */
743 } /*initialDumps */
744 } /*hashBuckets */
745 } /*oldnew */
746
747 /* write out the textual configuration information */
748 tcode = writeText(ut, fid, TB_DUMPSCHEDULE);
749 if (tcode) {
750 LogError(tcode, "writeDatabase: Can't write dump schedule\n");
751 ERROR(tcode);
752 }
753 tcode = writeText(ut, fid, TB_VOLUMESET);
754 if (tcode) {
755 LogError(tcode, "writeDatabase: Can't write volume set\n");
756 ERROR(tcode);
757 }
758 tcode = writeText(ut, fid, TB_TAPEHOSTS);
759 if (tcode) {
760 LogError(tcode, "writeDatabase: Can't write tape hosts\n");
761 ERROR(tcode);
762 }
763
764 tcode = writeStructHeader(fid, SD_END);
765 if (tcode) {
766 LogError(tcode, "writeDatabase: Can't write end savedb\n");
767 ERROR(tcode);
768 }
769
770 error_exit:
771 doneWriting(code);
772 return (code);
773}
774
775
776#ifdef notdef
777
778afs_int32
779canWrite(int fid)
780{
781 afs_int32 in, out, except;
782 struct timeval tp;
783 afs_int32 code;
784
785 tp.tv_sec = 0;
786 tp.tv_usec = 0;
787
788 out = (1 << fid);
789 in = 0;
790 except = 0;
791
792 code = IOMGR_Select(32, &in, &out, &except, &tp);
793 return (code);
794}
795
796#endif /* notdef */