2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
9 * Portions Copyright (c) 2005-2008 Sine Nomine Associates
15 Institution: The Information Technology Center, Carnegie-Mellon University
18 #include <afsconfig.h>
19 #include <afs/param.h>
25 #ifdef HAVE_SYS_FILE_H
30 #ifdef AFS_PTHREAD_ENV
33 #include <opr/jhash.h>
34 #include "rx/rx_queue.h"
35 #include <afs/afsint.h>
37 #include <afs/errors.h>
40 #include <afs/afssyscalls.h>
44 #include "volume_inline.h"
45 #include "vnode_inline.h"
46 #include "partition.h"
53 struct VnodeClassInfo VnodeClassInfo
[nVNODECLASSES
];
55 void VNLog(afs_int32 aop
, afs_int32 anparms
, ... );
57 /* logging stuff for finding bugs */
58 #define THELOGSIZE 5120
59 static afs_int32 theLog
[THELOGSIZE
];
60 static afs_int32 vnLogPtr
= 0;
62 VNLog(afs_int32 aop
, afs_int32 anparms
, ... )
67 va_start(ap
, anparms
);
70 anparms
= 4; /* do bounds checking */
72 temp
= (aop
<< 16) | anparms
;
73 theLog
[vnLogPtr
++] = temp
;
74 if (vnLogPtr
>= THELOGSIZE
)
76 for (temp
= 0; temp
< anparms
; temp
++) {
77 theLog
[vnLogPtr
++] = va_arg(ap
, afs_int32
);
78 if (vnLogPtr
>= THELOGSIZE
)
85 /* Vnode hash table. Just use the Jenkins hash of the vnode number,
86 * with the volume ID as an initval because it's there. (That will
87 * make the same vnode number in different volumes hash to a different
88 * value, which would probably not even be a big deal anyway.)
91 #define VNODE_HASH_TABLE_BITS 11
92 #define VNODE_HASH_TABLE_SIZE opr_jhash_size(VNODE_HASH_TABLE_BITS)
93 #define VNODE_HASH_TABLE_MASK opr_jhash_mask(VNODE_HASH_TABLE_BITS)
94 private Vnode
*VnodeHashTable
[VNODE_HASH_TABLE_SIZE
];
95 #define VNODE_HASH(volumeptr,vnodenumber)\
96 (opr_jhash_int((vnodenumber), V_id((volumeptr))) & VNODE_HASH_TABLE_MASK)
100 #define BAD_IGET -1000
102 /* There are two separate vnode queue types defined here:
103 * Each hash conflict chain -- is singly linked, with a single head
104 * pointer. New entries are added at the beginning. Old
105 * entries are removed by linear search, which generally
106 * only occurs after a disk read).
107 * LRU chain -- is doubly linked, single head pointer.
108 * Entries are added at the head, reclaimed from the tail,
109 * or removed from anywhere in the queue.
113 * add a vnode to the volume's vnode list.
115 * @param[in] vp volume object pointer
116 * @param[in] vnp vnode object pointer
118 * @note for DAFS, it may seem like we should be acquiring a lightweight ref
119 * on vp, but this would actually break things. Right now, this is ok
120 * because we destroy all vnode cache contents during during volume
125 * @internal volume package internal use only
128 AddToVVnList(Volume
* vp
, Vnode
* vnp
)
130 if (queue_IsOnQueue(vnp
))
134 Vn_cacheCheck(vnp
) = vp
->cacheCheck
;
135 queue_Append(&vp
->vnode_list
, vnp
);
136 Vn_stateFlags(vnp
) |= VN_ON_VVN
;
140 * delete a vnode from the volume's vnode list.
144 * @internal volume package internal use only
147 DeleteFromVVnList(Vnode
* vnp
)
149 Vn_volume(vnp
) = NULL
;
151 if (!queue_IsOnQueue(vnp
))
155 Vn_stateFlags(vnp
) &= ~(VN_ON_VVN
);
159 * add a vnode to the end of the lru.
161 * @param[in] vcp vnode class info object pointer
162 * @param[in] vnp vnode object pointer
164 * @internal vnode package internal use only
167 AddToVnLRU(struct VnodeClassInfo
* vcp
, Vnode
* vnp
)
169 if (Vn_stateFlags(vnp
) & VN_ON_LRU
) {
173 /* Add it to the circular LRU list */
174 if (vcp
->lruHead
== NULL
)
175 Abort("VPutVnode: vcp->lruHead==NULL");
177 vnp
->lruNext
= vcp
->lruHead
;
178 vnp
->lruPrev
= vcp
->lruHead
->lruPrev
;
179 vcp
->lruHead
->lruPrev
= vnp
;
180 vnp
->lruPrev
->lruNext
= vnp
;
184 /* If the vnode was just deleted, put it at the end of the chain so it
185 * will be reused immediately */
187 vcp
->lruHead
= vnp
->lruNext
;
189 Vn_stateFlags(vnp
) |= VN_ON_LRU
;
193 * delete a vnode from the lru.
195 * @param[in] vcp vnode class info object pointer
196 * @param[in] vnp vnode object pointer
198 * @internal vnode package internal use only
201 DeleteFromVnLRU(struct VnodeClassInfo
* vcp
, Vnode
* vnp
)
203 if (!(Vn_stateFlags(vnp
) & VN_ON_LRU
)) {
207 if (vnp
== vcp
->lruHead
)
208 vcp
->lruHead
= vcp
->lruHead
->lruNext
;
210 if ((vnp
== vcp
->lruHead
) ||
211 (vcp
->lruHead
== NULL
))
212 Abort("DeleteFromVnLRU: lru chain addled!\n");
214 vnp
->lruPrev
->lruNext
= vnp
->lruNext
;
215 vnp
->lruNext
->lruPrev
= vnp
->lruPrev
;
217 Vn_stateFlags(vnp
) &= ~(VN_ON_LRU
);
221 * add a vnode to the vnode hash table.
223 * @param[in] vnp vnode object pointer
227 * @post vnode on hash
229 * @internal vnode package internal use only
232 AddToVnHash(Vnode
* vnp
)
234 unsigned int newHash
;
236 if (!(Vn_stateFlags(vnp
) & VN_ON_HASH
)) {
237 newHash
= VNODE_HASH(Vn_volume(vnp
), Vn_id(vnp
));
238 vnp
->hashNext
= VnodeHashTable
[newHash
];
239 VnodeHashTable
[newHash
] = vnp
;
240 vnp
->hashIndex
= newHash
;
242 Vn_stateFlags(vnp
) |= VN_ON_HASH
;
247 * delete a vnode from the vnode hash table.
254 * @post vnode removed from hash
256 * @internal vnode package internal use only
259 DeleteFromVnHash(Vnode
* vnp
)
263 if (Vn_stateFlags(vnp
) & VN_ON_HASH
) {
264 tvnp
= VnodeHashTable
[vnp
->hashIndex
];
266 VnodeHashTable
[vnp
->hashIndex
] = vnp
->hashNext
;
268 while (tvnp
&& tvnp
->hashNext
!= vnp
)
269 tvnp
= tvnp
->hashNext
;
271 tvnp
->hashNext
= vnp
->hashNext
;
274 vnp
->hashNext
= NULL
;
276 Vn_stateFlags(vnp
) &= ~(VN_ON_HASH
);
282 * invalidate a vnode cache entry.
284 * @param[in] avnode vnode object pointer
288 * @post vnode metadata invalidated.
289 * vnode removed from hash table.
290 * DAFS: vnode state set to VN_STATE_INVALID.
292 * @internal vnode package internal use only
295 VInvalidateVnode_r(struct Vnode
*avnode
)
297 avnode
->changed_newTime
= 0; /* don't let it get flushed out again */
298 avnode
->changed_oldTime
= 0;
299 avnode
->delete = 0; /* it isn't deleted, really */
300 avnode
->cacheCheck
= 0; /* invalid: prevents future vnode searches from working */
301 DeleteFromVnHash(avnode
);
302 #ifdef AFS_DEMAND_ATTACH_FS
303 VnChangeState_r(avnode
, VN_STATE_INVALID
);
309 * initialize vnode cache for a given vnode class.
311 * @param[in] class vnode class
312 * @param[in] nVnodes size of cache
314 * @post vnode cache allocated and initialized
316 * @internal volume package internal use only
318 * @note generally called by VInitVolumePackage_r
320 * @see VInitVolumePackage_r
323 VInitVnodes(VnodeClass
class, int nVnodes
)
326 struct VnodeClassInfo
*vcp
= &VnodeClassInfo
[class];
328 vcp
->allocs
= vcp
->gets
= vcp
->reads
= vcp
->writes
= 0;
329 vcp
->cacheSize
= nVnodes
;
332 opr_Assert(CHECKSIZE_SMALLVNODE
);
334 vcp
->residentSize
= SIZEOF_SMALLVNODE
;
335 vcp
->diskSize
= SIZEOF_SMALLDISKVNODE
;
336 vcp
->magic
= SMALLVNODEMAGIC
;
340 vcp
->residentSize
= SIZEOF_LARGEVNODE
;
341 vcp
->diskSize
= SIZEOF_LARGEDISKVNODE
;
342 vcp
->magic
= LARGEVNODEMAGIC
;
346 int s
= vcp
->diskSize
- 1;
356 va
= (byte
*) calloc(nVnodes
, vcp
->residentSize
);
357 opr_Assert(va
!= NULL
);
359 Vnode
*vnp
= (Vnode
*) va
;
360 Vn_refcount(vnp
) = 0; /* no context switches */
361 Vn_stateFlags(vnp
) |= VN_ON_LRU
;
362 #ifdef AFS_DEMAND_ATTACH_FS
363 CV_INIT(&Vn_stateCV(vnp
), "vnode state", CV_DEFAULT
, 0);
364 Vn_state(vnp
) = VN_STATE_INVALID
;
366 #else /* !AFS_DEMAND_ATTACH_FS */
367 Lock_Init(&vnp
->lock
);
368 #endif /* !AFS_DEMAND_ATTACH_FS */
369 vnp
->changed_oldTime
= 0;
370 vnp
->changed_newTime
= 0;
371 Vn_volume(vnp
) = NULL
;
372 Vn_cacheCheck(vnp
) = 0;
373 vnp
->delete = Vn_id(vnp
) = 0;
374 #ifdef AFS_PTHREAD_ENV
375 vnp
->writer
= (pthread_t
) 0;
376 #else /* AFS_PTHREAD_ENV */
377 vnp
->writer
= (PROCESS
) 0;
378 #endif /* AFS_PTHREAD_ENV */
382 if (vcp
->lruHead
== NULL
)
383 vcp
->lruHead
= vnp
->lruNext
= vnp
->lruPrev
= vnp
;
385 vnp
->lruNext
= vcp
->lruHead
;
386 vnp
->lruPrev
= vcp
->lruHead
->lruPrev
;
387 vcp
->lruHead
->lruPrev
= vnp
;
388 vnp
->lruPrev
->lruNext
= vnp
;
391 va
+= vcp
->residentSize
;
398 * allocate an unused vnode from the lru chain.
400 * @param[in] vcp vnode class info object pointer
401 * @param[in] vp volume pointer
402 * @param[in] vnodeNumber new vnode number that the vnode will be used for
404 * @pre VOL_LOCK is held
406 * @post vnode object is removed from lru
407 * vnode is disassociated with its old volume, and associated with its
409 * vnode is removed from its old vnode hash table, and for DAFS, it is
410 * added to its new hash table
411 * state is set to VN_STATE_INVALID.
412 * inode handle is released.
413 * a reservation is held on the vnode object
415 * @note we traverse backwards along the lru circlist. It shouldn't
416 * be necessary to specify that nUsers == 0 since if it is in the list,
417 * nUsers should be 0. Things shouldn't be in lruq unless no one is
420 * @warning DAFS: VOL_LOCK is dropped while doing inode handle release
422 * @warning for non-DAFS, the vnode is _not_ hashed on the vnode hash table;
423 * non-DAFS must hash the vnode itself after loading data
425 * @return vnode object pointer
428 VGetFreeVnode_r(struct VnodeClassInfo
* vcp
, struct Volume
*vp
,
433 vnp
= vcp
->lruHead
->lruPrev
;
434 #ifdef AFS_DEMAND_ATTACH_FS
435 if (Vn_refcount(vnp
) != 0 || VnIsExclusiveState(Vn_state(vnp
)) ||
436 Vn_readers(vnp
) != 0)
437 Abort("VGetFreeVnode_r: in-use vnode in lruq");
439 if (Vn_refcount(vnp
) != 0 || CheckLock(&vnp
->lock
))
440 Abort("VGetFreeVnode_r: locked vnode in lruq");
442 VNLog(1, 2, Vn_id(vnp
), (intptr_t)vnp
, 0, 0);
445 * it's going to be overwritten soon enough.
446 * remove from LRU, delete hash entry, and
447 * disassociate from old parent volume before
448 * we have a chance to drop the vol glock
450 DeleteFromVnLRU(vcp
, vnp
);
451 DeleteFromVnHash(vnp
);
452 if (Vn_volume(vnp
)) {
453 DeleteFromVVnList(vnp
);
456 /* we must re-hash the vnp _before_ we drop the glock again; otherwise,
457 * someone else might try to grab the same vnode id, and we'll both alloc
458 * a vnode object for the same vn id, bypassing vnode locking */
459 Vn_id(vnp
) = vnodeNumber
;
460 VnCreateReservation_r(vnp
);
461 AddToVVnList(vp
, vnp
);
462 #ifdef AFS_DEMAND_ATTACH_FS
466 /* drop the file descriptor */
468 #ifdef AFS_DEMAND_ATTACH_FS
469 VnChangeState_r(vnp
, VN_STATE_RELEASING
);
472 /* release is, potentially, a highly latent operation due to a couple
474 * - ihandle package lock contention
475 * - closing file descriptor(s) associated with ih
477 * Hance, we perform outside of the volume package lock in order to
478 * reduce the probability of contention.
480 IH_RELEASE(vnp
->handle
);
481 #ifdef AFS_DEMAND_ATTACH_FS
486 #ifdef AFS_DEMAND_ATTACH_FS
487 VnChangeState_r(vnp
, VN_STATE_INVALID
);
495 * lookup a vnode in the vnode cache hash table.
497 * @param[in] vp pointer to volume object
498 * @param[in] vnodeId vnode id
502 * @post matching vnode object or NULL is returned
504 * @return vnode object pointer
505 * @retval NULL no matching vnode object was found in the cache
507 * @internal vnode package internal use only
509 * @note this symbol is exported strictly for fssync debug protocol use
512 VLookupVnode(Volume
* vp
, VnodeId vnodeId
)
515 unsigned int newHash
;
517 newHash
= VNODE_HASH(vp
, vnodeId
);
518 for (vnp
= VnodeHashTable
[newHash
];
520 ((Vn_id(vnp
) != vnodeId
) ||
521 (Vn_volume(vnp
) != vp
) ||
522 (vp
->cacheCheck
!= Vn_cacheCheck(vnp
))));
523 vnp
= vnp
->hashNext
);
530 VAllocVnode(Error
* ec
, Volume
* vp
, VnodeType type
, VnodeId in_vnode
, Unique in_unique
)
534 retVal
= VAllocVnode_r(ec
, vp
, type
, in_vnode
, in_unique
);
540 * allocate a new vnode.
542 * @param[out] ec error code return
543 * @param[in] vp volume object pointer
544 * @param[in] type desired vnode type
545 * @param[in] type desired vnode ID (optional)
546 * @param[in] type desired vnode Unique (optional)
548 * @return vnode object pointer
550 * @pre VOL_LOCK held;
551 * heavyweight ref held on vp
553 * @post vnode allocated and returned
556 VAllocVnode_r(Error
* ec
, Volume
* vp
, VnodeType type
, VnodeId in_vnode
, Unique in_unique
)
561 struct VnodeClassInfo
*vcp
;
564 struct vnodeIndex
*index
;
567 #ifdef AFS_DEMAND_ATTACH_FS
568 VolState vol_state_save
;
573 #ifdef AFS_DEMAND_ATTACH_FS
575 * once a volume has entered an error state, don't permit
576 * further operations to proceed
577 * -- tkeiser 11/21/2007
579 VWaitExclusiveState_r(vp
);
580 if (VIsErrorState(V_attachState(vp
))) {
581 /* XXX is VSALVAGING acceptable here? */
587 if (programType
== fileServer
&& !V_inUse(vp
)) {
588 if (vp
->specialStatus
) {
589 *ec
= vp
->specialStatus
;
595 class = vnodeTypeToClass(type
);
596 vcp
= &VnodeClassInfo
[class];
598 if (!VolumeWriteable(vp
)) {
599 *ec
= (bit32
) VREADONLY
;
603 if (vp
->nextVnodeUnique
> V_uniquifier(vp
)) {
604 VUpdateVolume_r(ec
, vp
, 0);
609 if (programType
== fileServer
) {
610 VAddToVolumeUpdateList_r(ec
, vp
);
616 * If in_vnode and in_unique are specified, we are asked to
617 * allocate a specifc vnode slot. Used by RW replication to
618 * keep vnode IDs consistent with the master.
624 unique
= vp
->nextVnodeUnique
++;
626 rollover
= 1; /* nextVnodeUnique rolled over */
627 vp
->nextVnodeUnique
= 2; /* 1 is reserved for the root vnode */
628 unique
= vp
->nextVnodeUnique
++;
631 if (vp
->nextVnodeUnique
> V_uniquifier(vp
) || rollover
) {
632 VUpdateVolume_r(ec
, vp
, 0);
637 /* Find a slot in the bit map */
638 bitNumber
= VAllocBitmapEntry_r(ec
, vp
, &vp
->vnodeIndex
[class],
639 VOL_ALLOC_BITMAP_WAIT
);
643 vnodeNumber
= bitNumberToVnodeNumber(bitNumber
, class);
645 index
= &vp
->vnodeIndex
[class];
650 /* Catch us up to where the master is */
651 if (in_unique
> vp
->nextVnodeUnique
)
652 vp
->nextVnodeUnique
= in_unique
+1;
654 if (vp
->nextVnodeUnique
> V_uniquifier(vp
)) {
655 VUpdateVolume_r(ec
, vp
, 0);
661 bitNumber
= vnodeIdToBitNumber(in_vnode
);
662 offset
= bitNumber
>> 3;
664 /* Mark vnode in use. Grow bitmap if needed. */
665 if ((offset
>= index
->bitmapSize
)
666 || ((*(index
->bitmap
+ offset
) & (1 << (bitNumber
& 0x7))) == 0))
668 /* Should not happen */
669 if (*(index
->bitmap
+ offset
) & (1 << (bitNumber
& 0x7))) {
674 *(index
->bitmap
+ offset
) |= (1 << (bitNumber
& 0x7));
675 vnodeNumber
= in_vnode
;
680 * at this point we should be assured that V_attachState(vp) is non-exclusive
684 VNLog(2, 1, vnodeNumber
, 0, 0, 0);
685 /* Prepare to move it to the new hash chain */
686 vnp
= VLookupVnode(vp
, vnodeNumber
);
688 /* slot already exists. May even not be in lruq (consider store file locking a file being deleted)
689 * so we may have to wait for it below */
690 VNLog(3, 2, vnodeNumber
, (intptr_t)vnp
, 0, 0);
692 VnCreateReservation_r(vnp
);
693 if (Vn_refcount(vnp
) == 1) {
694 /* we're the only user */
695 /* This won't block */
696 VnLock(vnp
, WRITE_LOCK
, VOL_LOCK_HELD
, WILL_NOT_DEADLOCK
);
698 #ifdef AFS_DEMAND_ATTACH_FS
701 * vnode was cached, wait for any existing exclusive ops to finish.
702 * once we have reacquired the lock, re-verify volume state.
704 * note: any vnode error state is related to the old vnode; disregard.
706 VnWaitQuiescent_r(vnp
);
707 if (VIsErrorState(V_attachState(vp
))) {
708 VnUnlock(vnp
, WRITE_LOCK
);
709 VnCancelReservation_r(vnp
);
715 /* other users present; follow locking hierarchy */
716 VnLock(vnp
, WRITE_LOCK
, VOL_LOCK_HELD
, MIGHT_DEADLOCK
);
719 * verify state of the world hasn't changed
721 * (technically, this should never happen because cachecheck
722 * is only updated during a volume attach, which should not
723 * happen when refs are held)
725 if (Vn_volume(vnp
)->cacheCheck
!= Vn_cacheCheck(vnp
)) {
726 VnUnlock(vnp
, WRITE_LOCK
);
727 VnCancelReservation_r(vnp
);
732 /* sanity check: vnode should be blank if it was deleted. If it's
733 * not blank, it is still in use somewhere; but the bitmap told us
734 * this vnode number was free, so something is wrong. */
735 if (vnp
->disk
.type
!= vNull
) {
737 Log("VAllocVnode: addled bitmap or vnode object! (vol %" AFS_VOLID_FMT
", "
738 "vnode %p, number %ld, type %ld)\n", afs_printable_VolumeId_lu(vp
->hashid
), vnp
,
739 (long)Vn_id(vnp
), (long)vnp
->disk
.type
);
741 VFreeBitMapEntry_r(&tmp
, vp
, &vp
->vnodeIndex
[class], bitNumber
,
742 VOL_FREE_BITMAP_WAIT
);
743 VInvalidateVnode_r(vnp
);
744 VnUnlock(vnp
, WRITE_LOCK
);
745 VnCancelReservation_r(vnp
);
746 #ifdef AFS_DEMAND_ATTACH_FS
747 VRequestSalvage_r(ec
, vp
, SALVSYNC_ERROR
, 0);
749 VForceOffline_r(vp
, 0);
755 /* no such vnode in the cache */
757 vnp
= VGetFreeVnode_r(vcp
, vp
, vnodeNumber
);
759 /* This will never block (guaranteed by check in VGetFreeVnode_r() */
760 VnLock(vnp
, WRITE_LOCK
, VOL_LOCK_HELD
, WILL_NOT_DEADLOCK
);
762 #ifdef AFS_DEMAND_ATTACH_FS
763 VnChangeState_r(vnp
, VN_STATE_ALLOC
);
766 /* Sanity check: is this vnode really not in use? */
769 IHandle_t
*ihP
= vp
->vnodeIndex
[class].handle
;
771 afs_foff_t off
= vnodeIndexOffset(vcp
, vnodeNumber
);
774 /* XXX we have a potential race here if two threads
775 * allocate new vnodes at the same time, and they
776 * both decide it's time to extend the index
779 #ifdef AFS_DEMAND_ATTACH_FS
781 * this race has been eliminated for the DAFS case
782 * using exclusive state VOL_STATE_VNODE_ALLOC
784 * if this becomes a bottleneck, there are ways to
785 * improve parallelism for this code path
786 * -- tkeiser 11/28/2007
788 VCreateReservation_r(vp
);
789 VWaitExclusiveState_r(vp
);
790 vol_state_save
= VChangeState_r(vp
, VOL_STATE_VNODE_ALLOC
);
796 Log("VAllocVnode: can't open index file!\n");
798 goto error_encountered
;
800 if ((size
= FDH_SIZE(fdP
)) < 0) {
801 Log("VAllocVnode: can't stat index file!\n");
803 goto error_encountered
;
805 if (off
+ vcp
->diskSize
<= size
) {
806 if (FDH_PREAD(fdP
, &vnp
->disk
, vcp
->diskSize
, off
) != vcp
->diskSize
) {
807 Log("VAllocVnode: can't read index file!\n");
809 goto error_encountered
;
811 if (vnp
->disk
.type
!= vNull
) {
812 Log("VAllocVnode: addled bitmap or index!\n");
814 goto error_encountered
;
817 /* growing file - grow in a reasonable increment */
818 char *buf
= malloc(16 * 1024);
820 Log("VAllocVnode: can't grow vnode index: out of memory\n");
822 goto error_encountered
;
824 memset(buf
, 0, 16 * 1024);
825 if ((FDH_PWRITE(fdP
, buf
, 16 * 1024, off
)) != 16 * 1024) {
826 Log("VAllocVnode: can't grow vnode index: write failed\n");
829 goto error_encountered
;
836 #ifdef AFS_DEMAND_ATTACH_FS
837 VChangeState_r(vp
, vol_state_save
);
838 VCancelReservation_r(vp
);
845 * close the file handle
847 * invalidate the vnode
848 * free up the bitmap entry (although salvager should take care of it)
850 * drop vnode lock and refs
855 VFreeBitMapEntry_r(&tmp
, vp
, &vp
->vnodeIndex
[class], bitNumber
, 0 /*flags*/);
856 VInvalidateVnode_r(vnp
);
857 VnUnlock(vnp
, WRITE_LOCK
);
858 VnCancelReservation_r(vnp
);
859 #ifdef AFS_DEMAND_ATTACH_FS
860 VRequestSalvage_r(ec
, vp
, SALVSYNC_ERROR
, 0);
861 VCancelReservation_r(vp
);
863 VForceOffline_r(vp
, 0);
868 VNLog(4, 2, vnodeNumber
, (intptr_t)vnp
, 0, 0);
869 #ifndef AFS_DEMAND_ATTACH_FS
874 VNLog(5, 1, (intptr_t)vnp
, 0, 0, 0);
875 memset(&vnp
->disk
, 0, sizeof(vnp
->disk
));
876 vnp
->changed_newTime
= 0; /* set this bit when vnode is updated */
877 vnp
->changed_oldTime
= 0; /* set this on CopyOnWrite. */
879 vnp
->disk
.vnodeMagic
= vcp
->magic
;
880 vnp
->disk
.type
= type
;
881 vnp
->disk
.uniquifier
= unique
;
885 #ifdef AFS_DEMAND_ATTACH_FS
886 VnChangeState_r(vnp
, VN_STATE_EXCLUSIVE
);
892 * load a vnode from disk.
894 * @param[out] ec client error code return
895 * @param[in] vp volume object pointer
896 * @param[in] vnp vnode object pointer
897 * @param[in] vcp vnode class info object pointer
898 * @param[in] class vnode class enumeration
900 * @pre vnode is registered in appropriate data structures;
901 * caller holds a ref on vnode; VOL_LOCK is held
903 * @post vnode data is loaded from disk.
904 * vnode state is set to VN_STATE_ONLINE.
905 * on failure, vnode is invalidated.
907 * @internal vnode package internal use only
910 VnLoad(Error
* ec
, Volume
* vp
, Vnode
* vnp
,
911 struct VnodeClassInfo
* vcp
, VnodeClass
class)
913 /* vnode not cached */
917 IHandle_t
*ihP
= vp
->vnodeIndex
[class].handle
;
924 #ifdef AFS_DEMAND_ATTACH_FS
925 VnChangeState_r(vnp
, VN_STATE_LOAD
);
928 /* This will never block */
929 VnLock(vnp
, WRITE_LOCK
, VOL_LOCK_HELD
, WILL_NOT_DEADLOCK
);
934 Log("VnLoad: can't open index dev=%u, i=%s\n", vp
->device
,
935 PrintInode(stmp
, vp
->vnodeIndex
[class].handle
->ih_ino
));
937 goto error_encountered_nolock
;
938 } else if ((nBytes
= FDH_PREAD(fdP
, (char *)&vnp
->disk
, vcp
->diskSize
, vnodeIndexOffset(vcp
, Vn_id(vnp
))))
940 /* Don't take volume off line if the inumber is out of range
941 * or the inode table is full. */
942 if (nBytes
== BAD_IGET
) {
943 Log("VnLoad: bad inumber %s\n",
944 PrintInode(stmp
, vp
->vnodeIndex
[class].handle
->ih_ino
));
947 } else if (nBytes
== -1 && errno
== EIO
) {
948 /* disk error; salvage */
949 Log("VnLoad: Couldn't read vnode %u, volume %" AFS_VOLID_FMT
" (%s); volume needs salvage\n", Vn_id(vnp
), afs_printable_VolumeId_lu(V_id(vp
)), V_name(vp
));
951 /* vnode is not allocated */
952 if (GetLogLevel() >= 5)
953 Log("VnLoad: Couldn't read vnode %u, volume %" AFS_VOLID_FMT
" (%s); read %d bytes, errno %d\n",
954 Vn_id(vnp
), afs_printable_VolumeId_lu(V_id(vp
)), V_name(vp
), (int)nBytes
, errno
);
958 goto error_encountered_nolock
;
963 /* Quick check to see that the data is reasonable */
964 if (vnp
->disk
.vnodeMagic
!= vcp
->magic
|| vnp
->disk
.type
== vNull
) {
965 if (vnp
->disk
.type
== vNull
) {
969 struct vnodeIndex
*index
= &vp
->vnodeIndex
[class];
970 unsigned int bitNumber
= vnodeIdToBitNumber(Vn_id(vnp
));
971 unsigned int offset
= bitNumber
>> 3;
973 #ifdef AFS_DEMAND_ATTACH_FS
974 /* Make sure the volume bitmap isn't getting updated while we are
976 VWaitExclusiveState_r(vp
);
979 /* Test to see if vnode number is valid. */
980 if ((offset
>= index
->bitmapSize
)
981 || ((*(index
->bitmap
+ offset
) & (1 << (bitNumber
& 0x7)))
983 Log("VnLoad: Request for unallocated vnode %u, volume %" AFS_VOLID_FMT
" (%s) denied.\n", Vn_id(vnp
), afs_printable_VolumeId_lu(V_id(vp
)), V_name(vp
));
987 Log("VnLoad: Bad magic number, vnode %u, volume %" AFS_VOLID_FMT
" (%s); volume needs salvage\n", Vn_id(vnp
), afs_printable_VolumeId_lu(V_id(vp
)), V_name(vp
));
990 goto error_encountered
;
993 IH_INIT(vnp
->handle
, V_device(vp
), afs_printable_VolumeId_lu(V_parentId(vp
)), VN_GET_INO(vnp
));
994 VnUnlock(vnp
, WRITE_LOCK
);
995 #ifdef AFS_DEMAND_ATTACH_FS
996 VnChangeState_r(vnp
, VN_STATE_ONLINE
);
1001 error_encountered_nolock
:
1003 FDH_REALLYCLOSE(fdP
);
1009 #ifdef AFS_DEMAND_ATTACH_FS
1010 VRequestSalvage_r(&error
, vp
, SALVSYNC_ERROR
, 0);
1012 VForceOffline_r(vp
, 0);
1019 VInvalidateVnode_r(vnp
);
1020 VnUnlock(vnp
, WRITE_LOCK
);
1024 * store a vnode to disk.
1026 * @param[out] ec error code output
1027 * @param[in] vp volume object pointer
1028 * @param[in] vnp vnode object pointer
1029 * @param[in] vcp vnode class info object pointer
1030 * @param[in] class vnode class enumeration
1032 * @pre VOL_LOCK held.
1033 * caller holds refs to volume and vnode.
1034 * DAFS: caller is responsible for performing state sanity checks.
1036 * @post vnode state is stored to disk.
1038 * @internal vnode package internal use only
1041 VnStore(Error
* ec
, Volume
* vp
, Vnode
* vnp
,
1042 struct VnodeClassInfo
* vcp
, VnodeClass
class)
1046 IHandle_t
*ihP
= vp
->vnodeIndex
[class].handle
;
1049 #ifdef AFS_DEMAND_ATTACH_FS
1050 VnState vn_state_save
;
1055 #ifdef AFS_DEMAND_ATTACH_FS
1056 vn_state_save
= VnChangeState_r(vnp
, VN_STATE_STORE
);
1059 offset
= vnodeIndexOffset(vcp
, Vn_id(vnp
));
1063 Log("VnStore: can't open index file!\n");
1064 goto error_encountered
;
1066 nBytes
= FDH_PWRITE(fdP
, &vnp
->disk
, vcp
->diskSize
, offset
);
1067 if (nBytes
!= vcp
->diskSize
) {
1068 /* Don't force volume offline if the inumber is out of
1069 * range or the inode table is full.
1071 FDH_REALLYCLOSE(fdP
);
1072 if (nBytes
== BAD_IGET
) {
1073 Log("VnStore: bad inumber %s\n",
1075 vp
->vnodeIndex
[class].handle
->ih_ino
));
1078 #ifdef AFS_DEMAND_ATTACH_FS
1079 VnChangeState_r(vnp
, VN_STATE_ERROR
);
1082 Log("VnStore: Couldn't write vnode %u, volume %" AFS_VOLID_FMT
" (%s) (error %d)\n", Vn_id(vnp
), afs_printable_VolumeId_lu(V_id(Vn_volume(vnp
))), V_name(Vn_volume(vnp
)), (int)nBytes
);
1083 #ifdef AFS_DEMAND_ATTACH_FS
1084 goto error_encountered
;
1087 VForceOffline_r(vp
, 0);
1097 #ifdef AFS_DEMAND_ATTACH_FS
1098 VnChangeState_r(vnp
, vn_state_save
);
1103 #ifdef AFS_DEMAND_ATTACH_FS
1104 /* XXX instead of dumping core, let's try to request a salvage
1105 * and just fail the putvnode */
1109 VnChangeState_r(vnp
, VN_STATE_ERROR
);
1110 VRequestSalvage_r(ec
, vp
, SALVSYNC_ERROR
, 0);
1117 * get a handle to a vnode object.
1119 * @param[out] ec error code
1120 * @param[in] vp volume object
1121 * @param[in] vnodeNumber vnode id
1122 * @param[in] locktype type of lock to acquire
1124 * @return vnode object pointer
1129 VGetVnode(Error
* ec
, Volume
* vp
, VnodeId vnodeNumber
, int locktype
)
1130 { /* READ_LOCK or WRITE_LOCK, as defined in lock.h */
1133 retVal
= VGetVnode_r(ec
, vp
, vnodeNumber
, locktype
);
1139 * get a handle to a vnode object.
1141 * @param[out] ec error code
1142 * @param[in] vp volume object
1143 * @param[in] vnodeNumber vnode id
1144 * @param[in] locktype type of lock to acquire
1146 * @return vnode object pointer
1148 * @internal vnode package internal use only
1150 * @pre VOL_LOCK held.
1151 * heavyweight ref held on volume object.
1154 VGetVnode_r(Error
* ec
, Volume
* vp
, VnodeId vnodeNumber
, int locktype
)
1155 { /* READ_LOCK or WRITE_LOCK, as defined in lock.h */
1158 struct VnodeClassInfo
*vcp
;
1162 if (vnodeNumber
== 0) {
1167 VNLog(100, 1, vnodeNumber
, 0, 0, 0);
1169 #ifdef AFS_DEMAND_ATTACH_FS
1171 * once a volume has entered an error state, don't permit
1172 * further operations to proceed
1173 * -- tkeiser 11/21/2007
1175 VWaitExclusiveState_r(vp
);
1176 if (VIsErrorState(V_attachState(vp
))) {
1177 /* XXX is VSALVAGING acceptable here? */
1183 if (programType
== fileServer
&& !V_inUse(vp
)) {
1184 *ec
= (vp
->specialStatus
? vp
->specialStatus
: VOFFLINE
);
1186 /* If the volume is VBUSY (being cloned or dumped) and this is
1187 * a READ operation, then don't fail.
1189 if ((*ec
!= VBUSY
) || (locktype
!= READ_LOCK
)) {
1194 class = vnodeIdToClass(vnodeNumber
);
1195 vcp
= &VnodeClassInfo
[class];
1196 if (locktype
== WRITE_LOCK
&& !VolumeWriteable(vp
)) {
1197 *ec
= (bit32
) VREADONLY
;
1201 if (locktype
== WRITE_LOCK
&& programType
== fileServer
) {
1202 VAddToVolumeUpdateList_r(ec
, vp
);
1210 /* See whether the vnode is in the cache. */
1211 vnp
= VLookupVnode(vp
, vnodeNumber
);
1213 /* vnode is in cache */
1215 VNLog(101, 2, vnodeNumber
, (intptr_t)vnp
, 0, 0);
1216 VnCreateReservation_r(vnp
);
1218 #ifdef AFS_DEMAND_ATTACH_FS
1220 * this is the one DAFS case where we may run into contention.
1221 * here's the basic control flow:
1223 * if locktype is READ_LOCK:
1224 * wait until vnode is not exclusive
1225 * set to VN_STATE_READ
1226 * increment read count
1229 * wait until vnode is quiescent
1230 * set to VN_STATE_EXCLUSIVE
1233 if (locktype
== READ_LOCK
) {
1234 VnWaitExclusiveState_r(vnp
);
1236 VnWaitQuiescent_r(vnp
);
1239 if (VnIsErrorState(Vn_state(vnp
))) {
1240 VnCancelReservation_r(vnp
);
1244 #endif /* AFS_DEMAND_ATTACH_FS */
1246 /* vnode not cached */
1248 /* Not in cache; tentatively grab most distantly used one from the LRU
1251 vnp
= VGetFreeVnode_r(vcp
, vp
, vnodeNumber
);
1254 vnp
->changed_newTime
= vnp
->changed_oldTime
= 0;
1258 * XXX for non-DAFS, there is a serious
1259 * race condition here:
1261 * two threads can race to load a vnode. the net
1262 * result is two struct Vnodes can be allocated
1263 * and hashed, which point to the same underlying
1264 * disk data store. conflicting vnode locks can
1265 * thus be held concurrently.
1267 * for non-DAFS to be safe, VOL_LOCK really shouldn't
1268 * be dropped in VnLoad. Of course, this would likely
1269 * lead to an unacceptable slow-down.
1272 VnLoad(ec
, vp
, vnp
, vcp
, class);
1274 VnCancelReservation_r(vnp
);
1277 #ifndef AFS_DEMAND_ATTACH_FS
1282 * there is no possibility for contention. we "own" this vnode.
1288 * it is imperative that nothing drop vol lock between here
1289 * and the VnBeginRead/VnChangeState stanza below
1292 VnLock(vnp
, locktype
, VOL_LOCK_HELD
, MIGHT_DEADLOCK
);
1294 /* Check that the vnode hasn't been removed while we were obtaining
1296 VNLog(102, 2, vnodeNumber
, (intptr_t) vnp
, 0, 0);
1297 if ((vnp
->disk
.type
== vNull
) || (Vn_cacheCheck(vnp
) == 0)) {
1298 VnUnlock(vnp
, locktype
);
1299 VnCancelReservation_r(vnp
);
1301 /* vnode is labelled correctly by now, so we don't have to invalidate it */
1305 #ifdef AFS_DEMAND_ATTACH_FS
1306 if (locktype
== READ_LOCK
) {
1309 VnChangeState_r(vnp
, VN_STATE_EXCLUSIVE
);
1313 if (programType
== fileServer
)
1314 VBumpVolumeUsage_r(Vn_volume(vnp
)); /* Hack; don't know where it should be
1315 * called from. Maybe VGetVolume */
1320 int TrustVnodeCacheEntry
= 1;
1321 /* This variable is bogus--when it's set to 0, the hash chains fill
1322 up with multiple versions of the same vnode. Should fix this!! */
1324 VPutVnode(Error
* ec
, Vnode
* vnp
)
1327 VPutVnode_r(ec
, vnp
);
1332 * put back a handle to a vnode object.
1334 * @param[out] ec client error code
1335 * @param[in] vnp vnode object pointer
1337 * @pre VOL_LOCK held.
1338 * ref held on vnode.
1340 * @post ref dropped on vnode.
1341 * if vnode was modified or deleted, it is written out to disk
1342 * (assuming a write lock was held).
1344 * @internal volume package internal use only
1347 VPutVnode_r(Error
* ec
, Vnode
* vnp
)
1351 struct VnodeClassInfo
*vcp
;
1354 opr_Assert(Vn_refcount(vnp
) != 0);
1355 class = vnodeIdToClass(Vn_id(vnp
));
1356 vcp
= &VnodeClassInfo
[class];
1357 opr_Assert(vnp
->disk
.vnodeMagic
== vcp
->magic
);
1358 VNLog(200, 2, Vn_id(vnp
), (intptr_t) vnp
, 0, 0);
1360 #ifdef AFS_DEMAND_ATTACH_FS
1361 writeLocked
= (Vn_state(vnp
) == VN_STATE_EXCLUSIVE
);
1363 writeLocked
= WriteLocked(&vnp
->lock
);
1368 #ifdef AFS_PTHREAD_ENV
1369 pthread_t thisProcess
= pthread_self();
1370 #else /* AFS_PTHREAD_ENV */
1371 PROCESS thisProcess
;
1372 LWP_CurrentProcess(&thisProcess
);
1373 #endif /* AFS_PTHREAD_ENV */
1374 VNLog(201, 2, (intptr_t) vnp
,
1375 ((vnp
->changed_newTime
) << 1) | ((vnp
->
1376 changed_oldTime
) << 1) | vnp
->
1378 if (thisProcess
!= vnp
->writer
)
1379 Abort("VPutVnode: Vnode at %"AFS_PTR_FMT
" locked by another process!\n",
1383 if (vnp
->changed_oldTime
|| vnp
->changed_newTime
|| vnp
->delete) {
1384 Volume
*vp
= Vn_volume(vnp
);
1385 afs_uint32 now
= FT_ApproxTime();
1386 opr_Assert(Vn_cacheCheck(vnp
) == vp
->cacheCheck
);
1389 /* No longer any directory entries for this vnode. Free the Vnode */
1390 memset(&vnp
->disk
, 0, sizeof(vnp
->disk
));
1391 /* delete flag turned off further down */
1392 VNLog(202, 2, Vn_id(vnp
), (intptr_t) vnp
, 0, 0);
1393 } else if (vnp
->changed_newTime
) {
1394 vnp
->disk
.serverModifyTime
= now
;
1396 if (vnp
->changed_newTime
)
1398 V_updateDate(vp
) = vp
->updateTime
= now
;
1399 if(V_volUpdateCounter(vp
)< UINT_MAX
)
1400 V_volUpdateCounter(vp
)++;
1403 /* The vnode has been changed. Write it out to disk */
1405 #ifdef AFS_DEMAND_ATTACH_FS
1406 VRequestSalvage_r(ec
, vp
, SALVSYNC_ERROR
, 0);
1408 opr_Assert(V_needsSalvaged(vp
));
1412 VnStore(ec
, vp
, vnp
, vcp
, class);
1414 /* If the vnode is to be deleted, and we wrote the vnode out,
1415 * free its bitmap entry. Do after the vnode is written so we
1416 * don't allocate from bitmap before the vnode is written
1417 * (doing so could cause a "addled bitmap" message).
1419 if (vnp
->delete && !*ec
) {
1420 if (V_filecount(Vn_volume(vnp
))-- < 1)
1421 V_filecount(Vn_volume(vnp
)) = 0;
1422 VFreeBitMapEntry_r(ec
, vp
, &vp
->vnodeIndex
[class],
1423 vnodeIdToBitNumber(Vn_id(vnp
)),
1424 VOL_FREE_BITMAP_WAIT
);
1428 vnp
->changed_newTime
= vnp
->changed_oldTime
= 0;
1430 #ifdef AFS_DEMAND_ATTACH_FS
1431 VnChangeState_r(vnp
, VN_STATE_ONLINE
);
1433 } else { /* Not write locked */
1434 if (vnp
->changed_newTime
|| vnp
->changed_oldTime
|| vnp
->delete)
1436 ("VPutVnode: Change or delete flag for vnode "
1437 "%"AFS_PTR_FMT
" is set but vnode is not write locked!\n",
1439 #ifdef AFS_DEMAND_ATTACH_FS
1444 /* Do not look at disk portion of vnode after this point; it may
1445 * have been deleted above */
1447 VnUnlock(vnp
, ((writeLocked
) ? WRITE_LOCK
: READ_LOCK
));
1448 VnCancelReservation_r(vnp
);
1452 * Make an attempt to convert a vnode lock from write to read.
1453 * Do nothing if the vnode isn't write locked or the vnode has
1457 VVnodeWriteToRead(Error
* ec
, Vnode
* vnp
)
1461 retVal
= VVnodeWriteToRead_r(ec
, vnp
);
1467 * convert vnode handle from mutually exclusive to shared access.
1469 * @param[out] ec client error code
1470 * @param[in] vnp vnode object pointer
1472 * @return unspecified use (see out argument 'ec' for error code return)
1474 * @pre VOL_LOCK held.
1475 * ref held on vnode.
1476 * write lock held on vnode.
1478 * @post read lock held on vnode.
1479 * if vnode was modified, it has been written to disk.
1481 * @internal volume package internal use only
1484 VVnodeWriteToRead_r(Error
* ec
, Vnode
* vnp
)
1488 struct VnodeClassInfo
*vcp
;
1489 #ifdef AFS_PTHREAD_ENV
1490 pthread_t thisProcess
;
1491 #else /* AFS_PTHREAD_ENV */
1492 PROCESS thisProcess
;
1493 #endif /* AFS_PTHREAD_ENV */
1496 opr_Assert(Vn_refcount(vnp
) != 0);
1497 class = vnodeIdToClass(Vn_id(vnp
));
1498 vcp
= &VnodeClassInfo
[class];
1499 opr_Assert(vnp
->disk
.vnodeMagic
== vcp
->magic
);
1500 VNLog(300, 2, Vn_id(vnp
), (intptr_t) vnp
, 0, 0);
1502 #ifdef AFS_DEMAND_ATTACH_FS
1503 writeLocked
= (Vn_state(vnp
) == VN_STATE_EXCLUSIVE
);
1505 writeLocked
= WriteLocked(&vnp
->lock
);
1512 VNLog(301, 2, (intptr_t) vnp
,
1513 ((vnp
->changed_newTime
) << 1) | ((vnp
->
1514 changed_oldTime
) << 1) | vnp
->
1518 #ifdef AFS_PTHREAD_ENV
1519 thisProcess
= pthread_self();
1520 #else /* AFS_PTHREAD_ENV */
1521 LWP_CurrentProcess(&thisProcess
);
1522 #endif /* AFS_PTHREAD_ENV */
1523 if (thisProcess
!= vnp
->writer
)
1524 Abort("VPutVnode: Vnode at %"AFS_PTR_FMT
1525 " locked by another process!\n", vnp
);
1530 if (vnp
->changed_oldTime
|| vnp
->changed_newTime
) {
1531 Volume
*vp
= Vn_volume(vnp
);
1532 afs_uint32 now
= FT_ApproxTime();
1533 opr_Assert(Vn_cacheCheck(vnp
) == vp
->cacheCheck
);
1534 if (vnp
->changed_newTime
)
1535 vnp
->disk
.serverModifyTime
= now
;
1536 if (vnp
->changed_newTime
)
1537 V_updateDate(vp
) = vp
->updateTime
= now
;
1539 /* The inode has been changed. Write it out to disk */
1541 #ifdef AFS_DEMAND_ATTACH_FS
1542 VRequestSalvage_r(ec
, vp
, SALVSYNC_ERROR
, 0);
1544 opr_Assert(V_needsSalvaged(vp
));
1548 VnStore(ec
, vp
, vnp
, vcp
, class);
1551 vnp
->changed_newTime
= vnp
->changed_oldTime
= 0;
1555 #ifdef AFS_DEMAND_ATTACH_FS
1556 VnChangeState_r(vnp
, VN_STATE_ONLINE
);
1559 ConvertWriteToReadLock(&vnp
->lock
);
1565 * initial size of ihandle pointer vector.
1567 * @see VInvalidateVnodesByVolume_r
1569 #define IH_VEC_BASE_SIZE 256
1572 * increment amount for growing ihandle pointer vector.
1574 * @see VInvalidateVnodesByVolume_r
1576 #define IH_VEC_INCREMENT 256
1579 * Compile list of ihandles to be released/reallyclosed at a later time.
1581 * @param[in] vp volume object pointer
1582 * @param[out] vec_out vector of ihandle pointers to be released/reallyclosed
1583 * @param[out] vec_len_out number of valid elements in ihandle vector
1585 * @pre - VOL_LOCK is held
1586 * - volume is in appropriate exclusive state (e.g. VOL_STATE_VNODE_CLOSE,
1587 * VOL_STATE_VNODE_RELEASE)
1589 * @post - all vnodes on VVn list are invalidated
1590 * - ih_vec is populated with all valid ihandles
1592 * @return operation status
1594 * @retval ENOMEM out of memory
1596 * @todo we should handle out of memory conditions more gracefully.
1598 * @internal vnode package internal use only
1601 VInvalidateVnodesByVolume_r(Volume
* vp
,
1602 IHandle_t
*** vec_out
,
1603 size_t * vec_len_out
)
1607 size_t i
= 0, vec_len
;
1608 IHandle_t
**ih_vec
, **ih_vec_new
;
1610 #ifdef AFS_DEMAND_ATTACH_FS
1612 #endif /* AFS_DEMAND_ATTACH_FS */
1614 vec_len
= IH_VEC_BASE_SIZE
;
1615 ih_vec
= malloc(sizeof(IHandle_t
*) * vec_len
);
1616 #ifdef AFS_DEMAND_ATTACH_FS
1623 * Traverse the volume's vnode list. Pull all the ihandles out into a
1624 * thread-private array for later asynchronous processing.
1626 #ifdef AFS_DEMAND_ATTACH_FS
1629 for (queue_Scan(&vp
->vnode_list
, vnp
, nvnp
, Vnode
)) {
1630 if (vnp
->handle
!= NULL
) {
1632 #ifdef AFS_DEMAND_ATTACH_FS
1635 vec_len
+= IH_VEC_INCREMENT
;
1636 ih_vec_new
= realloc(ih_vec
, sizeof(IHandle_t
*) * vec_len
);
1637 #ifdef AFS_DEMAND_ATTACH_FS
1640 if (ih_vec_new
== NULL
) {
1644 ih_vec
= ih_vec_new
;
1645 #ifdef AFS_DEMAND_ATTACH_FS
1647 * Theoretically, the volume's VVn list should not change
1648 * because the volume is in an exclusive state. For the
1649 * sake of safety, we will restart the traversal from the
1650 * the beginning (which is not expensive because we're
1651 * deleting the items from the list as we go).
1653 goto restart_traversal
;
1656 ih_vec
[i
++] = vnp
->handle
;
1659 DeleteFromVVnList(vnp
);
1660 VInvalidateVnode_r(vnp
);
1670 /* VCloseVnodeFiles - called when a volume is going off line. All open
1671 * files for vnodes in that volume are closed. This might be excessive,
1672 * since we may only be taking one volume of a volume group offline.
1675 VCloseVnodeFiles_r(Volume
* vp
)
1677 #ifdef AFS_DEMAND_ATTACH_FS
1678 VolState vol_state_save
;
1680 IHandle_t
** ih_vec
;
1683 #ifdef AFS_DEMAND_ATTACH_FS
1684 vol_state_save
= VChangeState_r(vp
, VOL_STATE_VNODE_CLOSE
);
1685 #endif /* AFS_DEMAND_ATTACH_FS */
1687 /* XXX need better error handling here */
1688 opr_Verify(VInvalidateVnodesByVolume_r(vp
, &ih_vec
,
1693 * now we drop VOL_LOCK while we perform some potentially very
1694 * expensive operations in the background
1696 #ifdef AFS_DEMAND_ATTACH_FS
1700 for (i
= 0; i
< vec_len
; i
++) {
1701 IH_REALLYCLOSE(ih_vec
[i
]);
1702 IH_RELEASE(ih_vec
[i
]);
1707 #ifdef AFS_DEMAND_ATTACH_FS
1709 VChangeState_r(vp
, vol_state_save
);
1710 #endif /* AFS_DEMAND_ATTACH_FS */
1715 * shut down all vnode cache state for a given volume.
1717 * @param[in] vp volume object pointer
1719 * @pre VOL_LOCK is held
1721 * @post all file descriptors closed.
1722 * all inode handles released.
1723 * all vnode cache objects disassociated from volume.
1725 * @note for DAFS, these operations are performed outside the vol glock under
1726 * volume exclusive state VOL_STATE_VNODE_RELEASE. Please further note
1727 * that it would be a bug to acquire and release a volume reservation
1728 * during this exclusive operation. This is due to the fact that we are
1729 * generally called during the refcount 1->0 transition.
1731 * @todo we should handle failures in VInvalidateVnodesByVolume_r more
1734 * @see VInvalidateVnodesByVolume_r
1736 * @internal this routine is internal to the volume package
1739 VReleaseVnodeFiles_r(Volume
* vp
)
1741 #ifdef AFS_DEMAND_ATTACH_FS
1742 VolState vol_state_save
;
1744 IHandle_t
** ih_vec
;
1747 #ifdef AFS_DEMAND_ATTACH_FS
1748 vol_state_save
= VChangeState_r(vp
, VOL_STATE_VNODE_RELEASE
);
1749 #endif /* AFS_DEMAND_ATTACH_FS */
1751 /* XXX need better error handling here */
1752 opr_Verify(VInvalidateVnodesByVolume_r(vp
, &ih_vec
,
1757 * now we drop VOL_LOCK while we perform some potentially very
1758 * expensive operations in the background
1760 #ifdef AFS_DEMAND_ATTACH_FS
1764 for (i
= 0; i
< vec_len
; i
++) {
1765 IH_RELEASE(ih_vec
[i
]);
1770 #ifdef AFS_DEMAND_ATTACH_FS
1772 VChangeState_r(vp
, vol_state_save
);
1773 #endif /* AFS_DEMAND_ATTACH_FS */