2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
22 * afs_WriteVCacheDiscon
40 #include <afsconfig.h>
41 #include "afs/param.h"
43 #include "afs/sysincludes.h" /*Standard vendor system headers */
44 #include "afsincludes.h" /*AFS-based standard headers */
45 #include "afs/afs_stats.h"
46 #include "afs/afs_cbqueue.h"
47 #include "afs/afs_osidnlc.h"
49 afs_int32 afs_maxvcount
= 0; /* max number of vcache entries */
50 afs_int32 afs_vcount
= 0; /* number of vcache in use now */
58 #endif /* AFS_SGI64_ENV */
60 /* Exported variables */
61 afs_rwlock_t afs_xvcdirty
; /*Lock: discon vcache dirty list mgmt */
62 afs_rwlock_t afs_xvcache
; /*Lock: alloc new stat cache entries */
63 afs_rwlock_t afs_xvreclaim
; /*Lock: entries reclaimed, not on free list */
64 afs_lock_t afs_xvcb
; /*Lock: fids on which there are callbacks */
65 #if !defined(AFS_LINUX22_ENV)
66 static struct vcache
*freeVCList
; /*Free list for stat cache entries */
67 struct vcache
*ReclaimedVCList
; /*Reclaimed list for stat entries */
68 static struct vcache
*Initial_freeVCList
; /*Initial list for above */
70 struct afs_q VLRU
; /*vcache LRU */
71 afs_int32 vcachegen
= 0;
72 unsigned int afs_paniconwarn
= 0;
73 struct vcache
*afs_vhashT
[VCSIZE
];
74 struct afs_q afs_vhashTV
[VCSIZE
];
75 static struct afs_cbr
*afs_cbrHashT
[CBRSIZE
];
76 afs_int32 afs_bulkStatsLost
;
77 int afs_norefpanic
= 0;
80 /* Disk backed vcache definitions
81 * Both protected by xvcache */
82 static int afs_nextVcacheSlot
= 0;
83 static struct afs_slotlist
*afs_freeSlotList
= NULL
;
85 /* Forward declarations */
86 static afs_int32
afs_QueueVCB(struct vcache
*avc
, int *slept
);
90 * The PFlush algorithm makes use of the fact that Fid.Unique is not used in
91 * below hash algorithms. Change it if need be so that flushing algorithm
92 * doesn't move things from one hash chain to another.
94 /* Don't hash on the cell; our callback-breaking code sometimes fails to compute
95 * the cell correctly, and only scans one hash bucket. */
96 int VCHash(struct VenusFid
*fid
)
98 return opr_jhash_int2(fid
->Fid
.Volume
, fid
->Fid
.Vnode
, 0) &
99 opr_jhash_mask(VCSIZEBITS
);
101 /* Hash only on volume to speed up volume callbacks. */
102 int VCHashV(struct VenusFid
*fid
)
104 return opr_jhash_int(fid
->Fid
.Volume
, 0) & opr_jhash_mask(VCSIZEBITS
);
108 * Generate an index into the hash table for a given Fid.
110 * \return The hash value.
113 afs_HashCBRFid(struct AFSFid
*fid
)
115 return (fid
->Volume
+ fid
->Vnode
+ fid
->Unique
) % CBRSIZE
;
119 * Insert a CBR entry into the hash table.
120 * Must be called with afs_xvcb held.
125 afs_InsertHashCBR(struct afs_cbr
*cbr
)
127 int slot
= afs_HashCBRFid(&cbr
->fid
);
129 cbr
->hash_next
= afs_cbrHashT
[slot
];
130 if (afs_cbrHashT
[slot
])
131 afs_cbrHashT
[slot
]->hash_pprev
= &cbr
->hash_next
;
133 cbr
->hash_pprev
= &afs_cbrHashT
[slot
];
134 afs_cbrHashT
[slot
] = cbr
;
139 * Flush the given vcache entry.
142 * afs_xvcache lock must be held for writing upon entry to
143 * prevent people from changing the vrefCount field, and to
144 * protect the lruq and hnext fields.
145 * LOCK: afs_FlushVCache afs_xvcache W
146 * REFCNT: vcache ref count must be zero on entry except for osf1
147 * RACE: lock is dropped and reobtained, permitting race in caller
149 * \param avc Pointer to vcache entry to flush.
150 * \param slept Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
154 afs_FlushVCache(struct vcache
*avc
, int *slept
)
155 { /*afs_FlushVCache */
158 struct vcache
**uvc
, *wvc
;
160 /* NOTE: We must have nothing drop afs_xvcache until we have removed all
161 * possible references to this vcache. This means all hash tables, queues,
165 AFS_STATCNT(afs_FlushVCache
);
166 afs_Trace2(afs_iclSetp
, CM_TRACE_FLUSHV
, ICL_TYPE_POINTER
, avc
,
167 ICL_TYPE_INT32
, avc
->f
.states
);
169 code
= osi_VM_FlushVCache(avc
);
173 if (avc
->f
.states
& CVFlushed
) {
177 #if !defined(AFS_LINUX22_ENV)
178 if (avc
->nextfree
|| !avc
->vlruq
.prev
|| !avc
->vlruq
.next
) { /* qv afs.h */
179 refpanic("LRU vs. Free inconsistency");
182 avc
->f
.states
|= CVFlushed
;
183 /* pull the entry out of the lruq and put it on the free list */
184 QRemove(&avc
->vlruq
);
186 /* keep track of # of files that we bulk stat'd, but never used
187 * before they got recycled.
189 if (avc
->f
.states
& CBulkStat
)
192 /* remove entry from the hash chain */
193 i
= VCHash(&avc
->f
.fid
);
194 uvc
= &afs_vhashT
[i
];
195 for (wvc
= *uvc
; wvc
; uvc
= &wvc
->hnext
, wvc
= *uvc
) {
203 /* remove entry from the volume hash table */
204 QRemove(&avc
->vhashq
);
206 #if defined(AFS_LINUX26_ENV)
208 struct pagewriter
*pw
, *store
;
209 struct list_head tofree
;
211 INIT_LIST_HEAD(&tofree
);
212 spin_lock(&avc
->pagewriter_lock
);
213 list_for_each_entry_safe(pw
, store
, &avc
->pagewriters
, link
) {
215 /* afs_osi_Free may sleep so we need to defer it */
216 list_add_tail(&pw
->link
, &tofree
);
218 spin_unlock(&avc
->pagewriter_lock
);
219 list_for_each_entry_safe(pw
, store
, &tofree
, link
) {
221 afs_osi_Free(pw
, sizeof(struct pagewriter
));
226 if (avc
->mvid
.target_root
)
227 osi_FreeSmallSpace(avc
->mvid
.target_root
);
228 avc
->mvid
.target_root
= NULL
;
230 afs_osi_Free(avc
->linkData
, strlen(avc
->linkData
) + 1);
231 avc
->linkData
= NULL
;
233 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
234 /* OK, there are no internal vrefCounts, so there shouldn't
235 * be any more refs here. */
237 #ifdef AFS_DARWIN80_ENV
238 vnode_clearfsnode(AFSTOV(avc
));
239 vnode_removefsref(AFSTOV(avc
));
241 avc
->v
->v_data
= NULL
; /* remove from vnode */
243 AFSTOV(avc
) = NULL
; /* also drop the ptr to vnode */
247 #ifdef AFS_SUN511_ENV
252 #elif defined(AFS_SUN510_ENV)
253 /* As we use private vnodes, cleanup is up to us */
254 vn_reinit(AFSTOV(avc
));
256 afs_FreeAllAxs(&(avc
->Access
));
257 afs_StaleVCacheFlags(avc
, AFS_STALEVC_FILENAME
, CUnique
);
259 /* By this point, the vcache has been removed from all global structures
260 * via which someone could try to use the vcache. It is okay to drop
261 * afs_xvcache at this point (if *slept is set). */
263 if (afs_shuttingdown
== AFS_RUNNING
)
264 afs_QueueVCB(avc
, slept
);
267 * Next, keep track of which vnodes we've deleted for create's
268 * optimistic synchronization algorithm
271 if (avc
->f
.fid
.Fid
.Vnode
& 1)
277 #if !defined(AFS_LINUX22_ENV)
278 /* put the entry in the free list */
279 avc
->nextfree
= freeVCList
;
281 if (avc
->vlruq
.prev
|| avc
->vlruq
.next
) {
282 refpanic("LRU vs. Free inconsistency");
284 avc
->f
.states
|= CVFlushed
;
286 /* This should put it back on the vnode free list since usecount is 1 */
288 if (VREFCOUNT_GT(avc
,0)) {
289 AFS_RELE(AFSTOV(avc
));
290 afs_stats_cmperf
.vcacheXAllocs
--;
292 if (afs_norefpanic
) {
293 afs_warn("flush vc refcnt < 1");
296 osi_Panic("flush vc refcnt < 1");
298 #endif /* AFS_LINUX22_ENV */
303 } /*afs_FlushVCache */
307 * The core of the inactive vnode op for all but IRIX.
313 afs_InactiveVCache(struct vcache
*avc
, afs_ucred_t
*acred
)
315 AFS_STATCNT(afs_inactive
);
316 if (avc
->f
.states
& CDirty
) {
317 /* we can't keep trying to push back dirty data forever. Give up. */
318 afs_InvalidateAllSegments(avc
); /* turns off dirty bit */
320 avc
->f
.states
&= ~CMAPPED
; /* mainly used by SunOS 4.0.x */
321 avc
->f
.states
&= ~CDirty
; /* Turn it off */
322 if (avc
->f
.states
& CUnlinked
) {
323 if (CheckLock(&afs_xvcache
) || CheckLock(&afs_xdcache
)) {
324 avc
->f
.states
|= CUnlinkedDel
;
327 afs_remunlink(avc
, 1); /* ignore any return code */
334 * Allocate a callback return structure from the
335 * free list and return it.
337 * Environment: The alloc and free routines are both called with the afs_xvcb lock
338 * held, so we don't have to worry about blocking in osi_Alloc.
340 * \return The allocated afs_cbr.
342 static struct afs_cbr
*afs_cbrSpace
= 0;
343 /* if alloc limit below changes, fix me! */
344 static struct afs_cbr
*afs_cbrHeads
[16];
351 while (!afs_cbrSpace
) {
352 if (afs_stats_cmperf
.CallBackAlloced
>= sizeof(afs_cbrHeads
)/sizeof(afs_cbrHeads
[0])) {
353 /* don't allocate more than 16 * AFS_NCBRS for now */
355 afs_stats_cmperf
.CallBackFlushes
++;
358 tsp
= afs_osi_Alloc(AFS_NCBRS
* sizeof(struct afs_cbr
));
359 osi_Assert(tsp
!= NULL
);
360 for (i
= 0; i
< AFS_NCBRS
- 1; i
++) {
361 tsp
[i
].next
= &tsp
[i
+ 1];
363 tsp
[AFS_NCBRS
- 1].next
= 0;
365 afs_cbrHeads
[afs_stats_cmperf
.CallBackAlloced
] = tsp
;
366 afs_stats_cmperf
.CallBackAlloced
++;
370 afs_cbrSpace
= tsp
->next
;
375 * Free a callback return structure, removing it from all lists.
377 * Environment: the xvcb lock is held over these calls.
379 * \param asp The address of the structure to free.
384 afs_FreeCBR(struct afs_cbr
*asp
)
386 *(asp
->pprev
) = asp
->next
;
388 asp
->next
->pprev
= asp
->pprev
;
390 *(asp
->hash_pprev
) = asp
->hash_next
;
392 asp
->hash_next
->hash_pprev
= asp
->hash_pprev
;
394 asp
->next
= afs_cbrSpace
;
400 FlushAllVCBs(int nconns
, struct rx_connection
**rxconns
,
401 struct afs_conn
**conns
)
406 results
= afs_osi_Alloc(nconns
* sizeof (afs_int32
));
407 osi_Assert(results
!= NULL
);
410 multi_Rx(rxconns
,nconns
)
412 multi_RXAFS_GiveUpAllCallBacks();
413 results
[multi_i
] = multi_error
;
418 * Freeing the CBR will unlink it from the server's CBR list
419 * do it here, not in the loop, because a dynamic CBR will call
420 * into the memory management routines.
422 for ( i
= 0 ; i
< nconns
; i
++ ) {
423 if (results
[i
] == 0) {
424 /* Unchain all of them */
425 while (conns
[i
]->parent
->srvr
->server
->cbrs
)
426 afs_FreeCBR(conns
[i
]->parent
->srvr
->server
->cbrs
);
429 afs_osi_Free(results
, nconns
* sizeof(afs_int32
));
433 * Flush all queued callbacks to all servers.
435 * Environment: holds xvcb lock over RPC to guard against race conditions
436 * when a new callback is granted for the same file later on.
438 * \return 0 for success.
441 afs_FlushVCBs(afs_int32 lockit
)
443 struct AFSFid
*tfids
;
444 struct AFSCallBack callBacks
[1];
445 struct AFSCBFids fidArray
;
446 struct AFSCBs cbArray
;
448 struct afs_cbr
*tcbrp
;
452 struct vrequest
*treq
= NULL
;
454 int safety1
, safety2
, safety3
;
457 if (AFS_IS_DISCONNECTED
)
460 if ((code
= afs_CreateReq(&treq
, afs_osi_credp
)))
462 treq
->flags
|= O_NONBLOCK
;
463 tfids
= afs_osi_Alloc(sizeof(struct AFSFid
) * AFS_MAXCBRSCALL
);
464 osi_Assert(tfids
!= NULL
);
467 ObtainWriteLock(&afs_xvcb
, 273);
470 * First, attempt a multi across everything, all addresses
471 * for all servers we know of.
475 afs_LoopServers(AFS_LS_ALL
, NULL
, 0, FlushAllVCBs
, NULL
);
477 ObtainReadLock(&afs_xserver
);
478 for (i
= 0; i
< NSERVERS
; i
++) {
479 for (safety1
= 0, tsp
= afs_servers
[i
];
480 tsp
&& safety1
< afs_totalServers
+ 10;
481 tsp
= tsp
->next
, safety1
++) {
483 if (tsp
->cbrs
== (struct afs_cbr
*)0)
486 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
487 * and make an RPC, over and over again.
489 tcount
= 0; /* number found so far */
490 for (safety2
= 0; safety2
< afs_cacheStats
; safety2
++) {
491 if (tcount
>= AFS_MAXCBRSCALL
|| !tsp
->cbrs
) {
492 struct rx_connection
*rxconn
;
493 /* if buffer is full, or we've queued all we're going
494 * to from this server, we should flush out the
497 fidArray
.AFSCBFids_len
= tcount
;
498 fidArray
.AFSCBFids_val
= (struct AFSFid
*)tfids
;
499 cbArray
.AFSCBs_len
= 1;
500 cbArray
.AFSCBs_val
= callBacks
;
501 memset(&callBacks
[0], 0, sizeof(callBacks
[0]));
502 callBacks
[0].CallBackType
= CB_EXCLUSIVE
;
503 for (safety3
= 0; safety3
< AFS_MAXHOSTS
* 2; safety3
++) {
504 tc
= afs_ConnByHost(tsp
, tsp
->cell
->fsport
,
505 tsp
->cell
->cellNum
, treq
, 0,
506 SHARED_LOCK
, 0, &rxconn
);
509 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS
);
512 RXAFS_GiveUpCallBacks(rxconn
, &fidArray
,
519 (tc
, rxconn
, code
, 0, treq
,
520 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS
, SHARED_LOCK
,
525 /* ignore return code, since callbacks may have
526 * been returned anyway, we shouldn't leave them
527 * around to be returned again.
529 * Next, see if we are done with this server, and if so,
530 * break to deal with the next one.
536 /* if to flush full buffer */
537 /* if we make it here, we have an entry at the head of cbrs,
538 * which we should copy to the file ID array and then free.
541 tfids
[tcount
++] = tcbrp
->fid
;
543 /* Freeing the CBR will unlink it from the server's CBR list */
545 } /* while loop for this one server */
546 if (safety2
> afs_cacheStats
) {
547 afs_warn("possible internal error afs_flushVCBs (%d)\n",
550 } /* for loop for this hash chain */
551 } /* loop through all hash chains */
552 if (safety1
> afs_totalServers
+ 2) {
554 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
555 safety1
, afs_totalServers
+ 2);
557 osi_Panic("afs_flushVCBS safety1");
560 ReleaseReadLock(&afs_xserver
);
562 ReleaseWriteLock(&afs_xvcb
);
563 afs_osi_Free(tfids
, sizeof(struct AFSFid
) * AFS_MAXCBRSCALL
);
564 afs_DestroyReq(treq
);
569 * Queue a callback on the given fid.
572 * Locks the xvcb lock.
573 * Called when the xvcache lock is already held.
574 * RACE: afs_xvcache may be dropped and reacquired
576 * \param avc vcache entry
577 * \param slep Set to 1 if we dropped afs_xvcache
578 * \return 1 if queued, 0 otherwise
582 afs_QueueVCB(struct vcache
*avc
, int *slept
)
586 struct afs_cbr
*tcbp
;
589 AFS_STATCNT(afs_QueueVCB
);
591 ObtainWriteLock(&afs_xvcb
, 274);
593 /* we can't really give back callbacks on RO files, since the
594 * server only tracks them on a per-volume basis, and we don't
595 * know whether we still have some other files from the same
597 if (!((avc
->f
.states
& CRO
) == 0 && avc
->callback
)) {
601 /* The callback is really just a struct server ptr. */
602 tsp
= (struct server
*)(avc
->callback
);
605 /* If we don't have CBR space, AllocCBR may block or hit the net for
606 * clearing up CBRs. Hitting the net may involve a fileserver
607 * needing to contact us, so we must drop xvcache so we don't block
608 * those requests from going through. */
609 reacquire
= *slept
= 1;
610 ReleaseWriteLock(&afs_xvcache
);
613 /* we now have a pointer to the server, so we just allocate
614 * a queue entry and queue it.
616 tcbp
= afs_AllocCBR();
617 tcbp
->fid
= avc
->f
.fid
.Fid
;
619 tcbp
->next
= tsp
->cbrs
;
621 tsp
->cbrs
->pprev
= &tcbp
->next
;
624 tcbp
->pprev
= &tsp
->cbrs
;
626 afs_InsertHashCBR(tcbp
);
630 /* now release locks and return */
631 ReleaseWriteLock(&afs_xvcb
);
634 /* make sure this is after dropping xvcb, for locking order */
635 ObtainWriteLock(&afs_xvcache
, 279);
642 * Remove a queued callback for a given Fid.
645 * Locks xvcb and xserver locks.
646 * Typically called with xdcache, xvcache and/or individual vcache
649 * \param afid The fid we want cleansed of queued callbacks.
654 afs_RemoveVCB(struct VenusFid
*afid
)
657 struct afs_cbr
*cbr
, *ncbr
;
659 AFS_STATCNT(afs_RemoveVCB
);
660 ObtainWriteLock(&afs_xvcb
, 275);
662 slot
= afs_HashCBRFid(&afid
->Fid
);
663 ncbr
= afs_cbrHashT
[slot
];
667 ncbr
= cbr
->hash_next
;
669 if (afid
->Fid
.Volume
== cbr
->fid
.Volume
&&
670 afid
->Fid
.Vnode
== cbr
->fid
.Vnode
&&
671 afid
->Fid
.Unique
== cbr
->fid
.Unique
) {
676 ReleaseWriteLock(&afs_xvcb
);
680 afs_FlushReclaimedVcaches(void)
682 #if !defined(AFS_LINUX22_ENV)
685 struct vcache
*tmpReclaimedVCList
= NULL
;
687 ObtainWriteLock(&afs_xvreclaim
, 76);
688 while (ReclaimedVCList
) {
689 tvc
= ReclaimedVCList
; /* take from free list */
690 ReclaimedVCList
= tvc
->nextfree
;
691 tvc
->nextfree
= NULL
;
692 code
= afs_FlushVCache(tvc
, &fv_slept
);
694 /* Ok, so, if we got code != 0, uh, wtf do we do? */
695 /* Probably, build a temporary list and then put all back when we
696 get to the end of the list */
697 /* This is actually really crappy, but we need to not leak these.
698 We probably need a way to be smarter about this. */
699 tvc
->nextfree
= tmpReclaimedVCList
;
700 tmpReclaimedVCList
= tvc
;
701 /* printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code); */
703 if (tvc
->f
.states
& (CVInit
704 #ifdef AFS_DARWIN80_ENV
708 tvc
->f
.states
&= ~(CVInit
709 #ifdef AFS_DARWIN80_ENV
713 afs_osi_Wakeup(&tvc
->f
.states
);
716 if (tmpReclaimedVCList
)
717 ReclaimedVCList
= tmpReclaimedVCList
;
719 ReleaseWriteLock(&afs_xvreclaim
);
724 afs_PostPopulateVCache(struct vcache
*avc
, struct VenusFid
*afid
, int seq
)
727 * The proper value for mvstat (for root fids) is setup by the caller.
729 avc
->mvstat
= AFS_MVSTAT_FILE
;
730 if (afid
->Fid
.Vnode
== 1 && afid
->Fid
.Unique
== 1)
731 avc
->mvstat
= AFS_MVSTAT_ROOT
;
733 if (afs_globalVFS
== 0)
734 osi_Panic("afs globalvfs");
736 osi_PostPopulateVCache(avc
);
739 osi_dnlc_purgedp(avc
); /* this may be overkill */
740 memset(&(avc
->callsort
), 0, sizeof(struct afs_q
));
742 avc
->f
.states
&=~ CVInit
;
744 avc
->f
.states
|= CBulkFetching
;
745 avc
->f
.m
.Length
= seq
;
747 afs_osi_Wakeup(&avc
->f
.states
);
751 afs_ShakeLooseVCaches(afs_int32 anumber
)
756 struct afs_q
*tq
, *uq
;
757 int fv_slept
, defersleep
= 0;
759 afs_int32 target
= anumber
;
766 for (tq
= VLRU
.prev
; tq
!= &VLRU
&& anumber
> 0; tq
= uq
) {
769 if (tvc
->f
.states
& CVFlushed
) {
770 refpanic("CVFlushed on VLRU");
771 } else if (i
++ > limit
) {
772 afs_warn("afs_ShakeLooseVCaches: i %d limit %d afs_vcount %d afs_maxvcount %d\n",
773 (int)i
, limit
, (int)afs_vcount
, (int)afs_maxvcount
);
774 refpanic("Found too many AFS vnodes on VLRU (VLRU cycle?)");
775 } else if (QNext(uq
) != tq
) {
776 refpanic("VLRU inconsistent");
777 } else if (tvc
->f
.states
& CVInit
) {
782 evicted
= osi_TryEvictVCache(tvc
, &fv_slept
, defersleep
);
792 * This vcache was busy and we slept while trying to evict it.
793 * Move this busy vcache to the head of the VLRU so vcaches
794 * following this busy vcache can be evicted during the retry.
796 QRemove(&tvc
->vlruq
);
797 QAdd(&VLRU
, &tvc
->vlruq
);
799 goto retry
; /* start over - may have raced. */
802 if (anumber
&& !defersleep
) {
809 if (!afsd_dynamic_vcaches
&& anumber
== target
) {
810 afs_warn("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
811 afs_vcount
, afs_maxvcount
);
817 /* Alloc new vnode. */
819 static struct vcache
*
820 afs_AllocVCache(void)
824 tvc
= osi_NewVnode();
829 if (afsd_dynamic_vcaches
&& afs_maxvcount
< afs_vcount
) {
830 afs_maxvcount
= afs_vcount
;
831 /*printf("peak vnodes: %d\n", afs_maxvcount);*/
834 afs_stats_cmperf
.vcacheXAllocs
++; /* count in case we have a leak */
836 /* If we create a new inode, we either give it a new slot number,
837 * or if one's available, use a slot number from the slot free list
839 if (afs_freeSlotList
!= NULL
) {
840 struct afs_slotlist
*tmp
;
842 tvc
->diskSlot
= afs_freeSlotList
->slot
;
843 tmp
= afs_freeSlotList
;
844 afs_freeSlotList
= tmp
->next
;
845 afs_osi_Free(tmp
, sizeof(struct afs_slotlist
));
847 tvc
->diskSlot
= afs_nextVcacheSlot
++;
853 /* Pre populate a newly allocated vcache. On platforms where the actual
854 * vnode is attached to the vcache, this function is called before attachment,
855 * therefore it cannot perform any actions on the vnode itself */
858 afs_PrePopulateVCache(struct vcache
*avc
, struct VenusFid
*afid
,
859 struct server
*serverp
) {
863 slot
= avc
->diskSlot
;
865 osi_PrePopulateVCache(avc
);
867 avc
->diskSlot
= slot
;
868 QZero(&avc
->metadirty
);
870 AFS_RWLOCK_INIT(&avc
->lock
, "vcache lock");
872 memset(&avc
->mvid
, 0, sizeof(avc
->mvid
));
873 avc
->linkData
= NULL
;
876 avc
->execsOrWriters
= 0;
878 avc
->f
.states
= CVInit
;
879 avc
->last_looker
= 0;
881 avc
->asynchrony
= -1;
885 avc
->f
.truncPos
= AFS_NOTRUNC
; /* don't truncate until we need to */
887 afs_SetDataVersion(avc
, &zero
); /* in case we copy it into flushDV */
889 avc
->callback
= serverp
; /* to minimize chance that clear
892 #if defined(AFS_CACHE_BYPASS)
893 avc
->cachingStates
= 0;
894 avc
->cachingTransitions
= 0;
899 afs_FlushAllVCaches(void)
902 struct vcache
*tvc
, *nvc
;
904 ObtainWriteLock(&afs_xvcache
, 867);
907 for (i
= 0; i
< VCSIZE
; i
++) {
908 for (tvc
= afs_vhashT
[i
]; tvc
; tvc
= nvc
) {
912 if (afs_FlushVCache(tvc
, &slept
)) {
913 afs_warn("Failed to flush vcache 0x%lx\n", (unsigned long)(uintptrsz
)tvc
);
921 ReleaseWriteLock(&afs_xvcache
);
925 * This routine is responsible for allocating a new cache entry
926 * from the free list. It formats the cache entry and inserts it
927 * into the appropriate hash tables. It must be called with
928 * afs_xvcache write-locked so as to prevent several processes from
929 * trying to create a new cache entry simultaneously.
931 * LOCK: afs_NewVCache afs_xvcache W
933 * \param afid The file id of the file whose cache entry is being created.
935 * \return The new vcache struct.
938 static_inline
struct vcache
*
939 afs_NewVCache_int(struct VenusFid
*afid
, struct server
*serverp
, int seq
)
943 afs_int32 anumber
= VCACHE_FREE
;
945 AFS_STATCNT(afs_NewVCache
);
947 afs_FlushReclaimedVcaches();
949 #if defined(AFS_LINUX22_ENV)
950 if(!afsd_dynamic_vcaches
&& afs_vcount
>= afs_maxvcount
) {
951 afs_ShakeLooseVCaches(anumber
);
952 if (afs_vcount
>= afs_maxvcount
) {
953 afs_warn("afs_NewVCache - none freed\n");
957 tvc
= afs_AllocVCache();
958 #else /* AFS_LINUX22_ENV */
959 /* pull out a free cache entry */
961 afs_ShakeLooseVCaches(anumber
);
965 tvc
= afs_AllocVCache();
967 tvc
= freeVCList
; /* take from free list */
968 freeVCList
= tvc
->nextfree
;
969 tvc
->nextfree
= NULL
;
970 afs_vcount
++; /* balanced by FlushVCache */
971 } /* end of if (!freeVCList) */
973 #endif /* AFS_LINUX22_ENV */
975 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
977 panic("afs_NewVCache(): free vcache with vnode attached");
980 /* Populate the vcache with as much as we can. */
981 afs_PrePopulateVCache(tvc
, afid
, serverp
);
983 /* Thread the vcache onto the VLRU */
988 tvc
->hnext
= afs_vhashT
[i
];
990 QAdd(&afs_vhashTV
[j
], &tvc
->vhashq
);
992 if ((VLRU
.next
->prev
!= &VLRU
) || (VLRU
.prev
->next
!= &VLRU
)) {
993 refpanic("NewVCache VLRU inconsistent");
995 QAdd(&VLRU
, &tvc
->vlruq
); /* put in lruq */
996 if ((VLRU
.next
->prev
!= &VLRU
) || (VLRU
.prev
->next
!= &VLRU
)) {
997 refpanic("NewVCache VLRU inconsistent2");
999 if (tvc
->vlruq
.next
->prev
!= &(tvc
->vlruq
)) {
1000 refpanic("NewVCache VLRU inconsistent3");
1002 if (tvc
->vlruq
.prev
->next
!= &(tvc
->vlruq
)) {
1003 refpanic("NewVCache VLRU inconsistent4");
1007 /* it should now be safe to drop the xvcache lock - so attach an inode
1008 * to this vcache, where necessary */
1009 osi_AttachVnode(tvc
, seq
);
1011 /* Get a reference count to hold this vcache for the VLRUQ. Note that
1012 * we have to do this after attaching the vnode, because the reference
1013 * count may be held in the vnode itself */
1015 #if defined(AFS_LINUX22_ENV)
1016 /* Hold it for the LRU (should make count 2) */
1018 #elif !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
1019 VREFCOUNT_SET(tvc
, 1); /* us */
1022 #if defined (AFS_FBSD_ENV)
1023 if (tvc
->f
.states
& CVInit
)
1025 afs_PostPopulateVCache(tvc
, afid
, seq
);
1028 } /*afs_NewVCache */
1032 afs_NewVCache(struct VenusFid
*afid
, struct server
*serverp
)
1034 return afs_NewVCache_int(afid
, serverp
, 0);
1038 afs_NewBulkVCache(struct VenusFid
*afid
, struct server
*serverp
, int seq
)
1040 return afs_NewVCache_int(afid
, serverp
, seq
);
1046 * LOCK: afs_FlushActiveVcaches afs_xvcache N
1048 * \param doflocks : Do we handle flocks?
1051 afs_FlushActiveVcaches(afs_int32 doflocks
)
1055 struct afs_conn
*tc
;
1057 afs_ucred_t
*cred
= NULL
;
1058 struct vrequest
*treq
= NULL
;
1059 struct AFSVolSync tsync
;
1062 AFS_STATCNT(afs_FlushActiveVcaches
);
1064 code
= afs_CreateReq(&treq
, afs_osi_credp
);
1066 afs_warn("unable to alloc treq\n");
1070 ObtainReadLock(&afs_xvcache
);
1071 for (i
= 0; i
< VCSIZE
; i
++) {
1072 for (tvc
= afs_vhashT
[i
]; tvc
; tvc
= tvc
->hnext
) {
1073 if (tvc
->f
.states
& CVInit
) continue;
1074 #ifdef AFS_DARWIN80_ENV
1075 if (tvc
->f
.states
& CDeadVnode
&&
1076 (tvc
->f
.states
& (CCore
|CUnlinkedDel
) ||
1077 tvc
->flockCount
)) panic("Dead vnode has core/unlinkedel/flock");
1079 if (doflocks
&& tvc
->flockCount
!= 0) {
1080 struct rx_connection
*rxconn
;
1081 /* if this entry has an flock, send a keep-alive call out */
1083 ReleaseReadLock(&afs_xvcache
);
1084 ObtainWriteLock(&tvc
->lock
, 51);
1086 code
= afs_InitReq(treq
, afs_osi_credp
);
1089 break; /* shutting down: do not try to extend the lock */
1091 treq
->flags
|= O_NONBLOCK
;
1093 tc
= afs_Conn(&tvc
->f
.fid
, treq
, SHARED_LOCK
, &rxconn
);
1095 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK
);
1098 RXAFS_ExtendLock(rxconn
,
1099 (struct AFSFid
*)&tvc
->f
.fid
.Fid
,
1105 } while (afs_Analyze
1106 (tc
, rxconn
, code
, &tvc
->f
.fid
, treq
,
1107 AFS_STATS_FS_RPCIDX_EXTENDLOCK
, SHARED_LOCK
, NULL
));
1109 ReleaseWriteLock(&tvc
->lock
);
1110 #ifdef AFS_DARWIN80_ENV
1112 ObtainReadLock(&afs_xvcache
);
1114 ObtainReadLock(&afs_xvcache
);
1119 if ((tvc
->f
.states
& CCore
) || (tvc
->f
.states
& CUnlinkedDel
)) {
1121 * Don't let it evaporate in case someone else is in
1122 * this code. Also, drop the afs_xvcache lock while
1123 * getting vcache locks.
1126 ReleaseReadLock(&afs_xvcache
);
1127 #if defined(AFS_SGI_ENV)
1129 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1131 osi_Assert(VREFCOUNT_GT(tvc
,0));
1132 AFS_RWLOCK((vnode_t
*) tvc
, VRWLOCK_WRITE
);
1134 ObtainWriteLock(&tvc
->lock
, 52);
1135 if (tvc
->f
.states
& CCore
) {
1136 tvc
->f
.states
&= ~CCore
;
1137 /* XXXX Find better place-holder for cred XXXX */
1138 cred
= (afs_ucred_t
*)tvc
->linkData
;
1139 tvc
->linkData
= NULL
; /* XXX */
1140 code
= afs_InitReq(treq
, cred
);
1141 afs_Trace2(afs_iclSetp
, CM_TRACE_ACTCCORE
,
1142 ICL_TYPE_POINTER
, tvc
, ICL_TYPE_INT32
,
1143 tvc
->execsOrWriters
);
1144 if (!code
) { /* avoid store when shutting down */
1145 code
= afs_StoreOnLastReference(tvc
, treq
);
1147 ReleaseWriteLock(&tvc
->lock
);
1148 hzero(tvc
->flushDV
);
1151 if (code
&& code
!= VNOVNODE
) {
1152 afs_StoreWarn(code
, tvc
->f
.fid
.Fid
.Volume
,
1153 /* /dev/console */ 1);
1155 } else if (tvc
->f
.states
& CUnlinkedDel
) {
1159 ReleaseWriteLock(&tvc
->lock
);
1160 #if defined(AFS_SGI_ENV)
1161 AFS_RWUNLOCK((vnode_t
*) tvc
, VRWLOCK_WRITE
);
1163 afs_remunlink(tvc
, 0);
1164 #if defined(AFS_SGI_ENV)
1165 AFS_RWLOCK((vnode_t
*) tvc
, VRWLOCK_WRITE
);
1168 /* lost (or won, perhaps) the race condition */
1169 ReleaseWriteLock(&tvc
->lock
);
1171 #if defined(AFS_SGI_ENV)
1172 AFS_RWUNLOCK((vnode_t
*) tvc
, VRWLOCK_WRITE
);
1174 #ifdef AFS_DARWIN80_ENV
1177 AFS_RELE(AFSTOV(tvc
));
1178 /* Matches write code setting CCore flag */
1181 ObtainReadLock(&afs_xvcache
);
1183 ObtainReadLock(&afs_xvcache
);
1186 AFS_RELE(AFSTOV(tvc
));
1187 /* Matches write code setting CCore flag */
1194 ReleaseReadLock(&afs_xvcache
);
1195 afs_DestroyReq(treq
);
1201 * Make sure a cache entry is up-to-date status-wise.
1203 * NOTE: everywhere that calls this can potentially be sped up
1204 * by checking CStatd first, and avoiding doing the InitReq
1205 * if this is up-to-date.
1207 * Anymore, the only places that call this KNOW already that the
1208 * vcache is not up-to-date, so we don't screw around.
1210 * \param avc : Ptr to vcache entry to verify.
1216 * Make sure a cache entry is up-to-date status-wise.
1218 * NOTE: everywhere that calls this can potentially be sped up
1219 * by checking CStatd first, and avoiding doing the InitReq
1220 * if this is up-to-date.
1222 * Anymore, the only places that call this KNOW already that the
1223 * vcache is not up-to-date, so we don't screw around.
1225 * \param avc Pointer to vcache entry to verify.
1228 * \return 0 for success or other error codes.
1231 afs_VerifyVCache2(struct vcache
*avc
, struct vrequest
*areq
)
1235 AFS_STATCNT(afs_VerifyVCache
);
1237 /* otherwise we must fetch the status info */
1239 ObtainWriteLock(&avc
->lock
, 53);
1240 if (avc
->f
.states
& CStatd
) {
1241 ReleaseWriteLock(&avc
->lock
);
1244 afs_StaleVCacheFlags(avc
, AFS_STALEVC_FILENAME
| AFS_STALEVC_CLEARCB
,
1246 ReleaseWriteLock(&avc
->lock
);
1248 /* fetch the status info */
1249 tvc
= afs_GetVCache(&avc
->f
.fid
, areq
, NULL
, avc
);
1252 /* Put it back; caller has already incremented vrefCount */
1256 } /*afs_VerifyVCache */
1260 * Simple copy of stat info into cache.
1262 * Callers:as of 1992-04-29, only called by WriteVCache
1264 * \param avc Ptr to vcache entry involved.
1265 * \param astat Ptr to stat info to copy.
1269 afs_SimpleVStat(struct vcache
*avc
,
1270 struct AFSFetchStatus
*astat
, struct vrequest
*areq
)
1273 AFS_STATCNT(afs_SimpleVStat
);
1275 #ifdef AFS_64BIT_CLIENT
1276 FillInt64(length
, astat
->Length_hi
, astat
->Length
);
1277 #else /* AFS_64BIT_CLIENT */
1278 length
= astat
->Length
;
1279 #endif /* AFS_64BIT_CLIENT */
1281 #if defined(AFS_SGI_ENV)
1282 if ((avc
->execsOrWriters
<= 0) && !afs_DirtyPages(avc
)
1283 && !AFS_VN_MAPPED((vnode_t
*) avc
)) {
1284 osi_Assert((valusema(&avc
->vc_rwlock
) <= 0)
1285 && (OSI_GET_LOCKID() == avc
->vc_rwlockid
));
1286 if (length
< avc
->f
.m
.Length
) {
1287 vnode_t
*vp
= (vnode_t
*) avc
;
1289 osi_Assert(WriteLocked(&avc
->lock
));
1290 ReleaseWriteLock(&avc
->lock
);
1292 PTOSSVP(vp
, (off_t
) length
, (off_t
) MAXLONG
);
1294 ObtainWriteLock(&avc
->lock
, 67);
1299 if (!afs_DirtyPages(avc
)) {
1300 /* if actively writing the file, don't fetch over this value */
1301 afs_Trace3(afs_iclSetp
, CM_TRACE_SIMPLEVSTAT
, ICL_TYPE_POINTER
, avc
,
1302 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(avc
->f
.m
.Length
),
1303 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(length
));
1304 avc
->f
.m
.Length
= length
;
1305 avc
->f
.m
.Date
= astat
->ClientModTime
;
1307 avc
->f
.m
.Owner
= astat
->Owner
;
1308 avc
->f
.m
.Group
= astat
->Group
;
1309 avc
->f
.m
.Mode
= astat
->UnixModeBits
;
1310 if (vType(avc
) == VREG
) {
1311 avc
->f
.m
.Mode
|= S_IFREG
;
1312 } else if (vType(avc
) == VDIR
) {
1313 avc
->f
.m
.Mode
|= S_IFDIR
;
1314 } else if (vType(avc
) == VLNK
) {
1315 avc
->f
.m
.Mode
|= S_IFLNK
;
1316 if ((avc
->f
.m
.Mode
& 0111) == 0)
1317 avc
->mvstat
= AFS_MVSTAT_MTPT
;
1319 if (avc
->f
.states
& CForeign
) {
1320 struct axscache
*ac
;
1321 avc
->f
.anyAccess
= astat
->AnonymousAccess
;
1323 if ((astat
->CallerAccess
& ~astat
->AnonymousAccess
))
1325 * Caller has at least one bit not covered by anonymous, and
1326 * thus may have interesting rights.
1328 * HOWEVER, this is a really bad idea, because any access query
1329 * for bits which aren't covered by anonymous, on behalf of a user
1330 * who doesn't have any special rights, will result in an answer of
1331 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1332 * It's an especially bad idea under Ultrix, since (due to the lack of
1333 * a proper access() call) it must perform several afs_access() calls
1334 * in order to create magic mode bits that vary according to who makes
1335 * the call. In other words, _every_ stat() generates a test for
1338 #endif /* badidea */
1339 if (avc
->Access
&& (ac
= afs_FindAxs(avc
->Access
, areq
->uid
)))
1340 ac
->axess
= astat
->CallerAccess
;
1341 else /* not found, add a new one if possible */
1342 afs_AddAxs(avc
->Access
, areq
->uid
, astat
->CallerAccess
);
1345 } /*afs_SimpleVStat */
1349 * Store the status info *only* back to the server for a
1352 * Environment: Must be called with a shared lock held on the vnode.
1354 * \param avc Ptr to the vcache entry.
1355 * \param astatus Ptr to the status info to store.
1356 * \param areq Ptr to the associated vrequest.
1358 * \return Operation status.
1362 afs_WriteVCache(struct vcache
*avc
,
1363 struct AFSStoreStatus
*astatus
,
1364 struct vrequest
*areq
)
1367 struct afs_conn
*tc
;
1368 struct AFSFetchStatus OutStatus
;
1369 struct AFSVolSync tsync
;
1370 struct rx_connection
*rxconn
;
1372 AFS_STATCNT(afs_WriteVCache
);
1373 afs_Trace2(afs_iclSetp
, CM_TRACE_WVCACHE
, ICL_TYPE_POINTER
, avc
,
1374 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(avc
->f
.m
.Length
));
1376 tc
= afs_Conn(&avc
->f
.fid
, areq
, SHARED_LOCK
, &rxconn
);
1378 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS
);
1381 RXAFS_StoreStatus(rxconn
, (struct AFSFid
*)&avc
->f
.fid
.Fid
,
1382 astatus
, &OutStatus
, &tsync
);
1387 } while (afs_Analyze
1388 (tc
, rxconn
, code
, &avc
->f
.fid
, areq
, AFS_STATS_FS_RPCIDX_STORESTATUS
,
1389 SHARED_LOCK
, NULL
));
1391 UpgradeSToWLock(&avc
->lock
, 20);
1393 /* success, do the changes locally */
1394 afs_SimpleVStat(avc
, &OutStatus
, areq
);
1396 * Update the date, too. SimpleVStat didn't do this, since
1397 * it thought we were doing this after fetching new status
1398 * over a file being written.
1400 avc
->f
.m
.Date
= OutStatus
.ClientModTime
;
1402 /* failure, set up to check with server next time */
1403 afs_StaleVCacheFlags(avc
, 0, CUnique
);
1405 ConvertWToSLock(&avc
->lock
);
1408 } /*afs_WriteVCache */
1411 * Store status info only locally, set the proper disconnection flags
1412 * and add to dirty list.
1414 * \param avc The vcache to be written locally.
1415 * \param astatus Get attr fields from local store.
1416 * \param attrs This one is only of the vs_size.
1418 * \note Must be called with a shared lock on the vnode
1421 afs_WriteVCacheDiscon(struct vcache
*avc
,
1422 struct AFSStoreStatus
*astatus
,
1423 struct vattr
*attrs
)
1426 afs_int32 flags
= 0;
1428 UpgradeSToWLock(&avc
->lock
, 700);
1430 if (!astatus
->Mask
) {
1436 /* Set attributes. */
1437 if (astatus
->Mask
& AFS_SETMODTIME
) {
1438 avc
->f
.m
.Date
= astatus
->ClientModTime
;
1439 flags
|= VDisconSetTime
;
1442 if (astatus
->Mask
& AFS_SETOWNER
) {
1443 /* printf("Not allowed yet. \n"); */
1444 /*avc->f.m.Owner = astatus->Owner;*/
1447 if (astatus
->Mask
& AFS_SETGROUP
) {
1448 /* printf("Not allowed yet. \n"); */
1449 /*avc->f.m.Group = astatus->Group;*/
1452 if (astatus
->Mask
& AFS_SETMODE
) {
1453 avc
->f
.m
.Mode
= astatus
->UnixModeBits
;
1455 #if 0 /* XXX: Leaving this out, so it doesn't mess up the file type flag.*/
1457 if (vType(avc
) == VREG
) {
1458 avc
->f
.m
.Mode
|= S_IFREG
;
1459 } else if (vType(avc
) == VDIR
) {
1460 avc
->f
.m
.Mode
|= S_IFDIR
;
1461 } else if (vType(avc
) == VLNK
) {
1462 avc
->f
.m
.Mode
|= S_IFLNK
;
1463 if ((avc
->f
.m
.Mode
& 0111) == 0)
1464 avc
->mvstat
= AFS_MVSTAT_MTPT
;
1467 flags
|= VDisconSetMode
;
1468 } /* if(astatus.Mask & AFS_SETMODE) */
1470 } /* if (!astatus->Mask) */
1472 if (attrs
->va_size
> 0) {
1473 /* XXX: Do I need more checks? */
1474 /* Truncation operation. */
1475 flags
|= VDisconTrunc
;
1479 afs_DisconAddDirty(avc
, flags
, 1);
1481 /* XXX: How about the rest of the fields? */
1483 ConvertWToSLock(&avc
->lock
);
1489 * Copy astat block into vcache info
1491 * \note This code may get dataversion and length out of sync if the file has
1492 * been modified. This is less than ideal. I haven't thought about it sufficiently
1493 * to be certain that it is adequate.
1495 * \note Environment: Must be called under a write lock
1497 * \param avc Ptr to vcache entry.
1498 * \param astat Ptr to stat block to copy in.
1499 * \param areq Ptr to associated request.
1502 afs_ProcessFS(struct vcache
*avc
,
1503 struct AFSFetchStatus
*astat
, struct vrequest
*areq
)
1507 AFS_STATCNT(afs_ProcessFS
);
1509 #ifdef AFS_64BIT_CLIENT
1510 FillInt64(length
, astat
->Length_hi
, astat
->Length
);
1511 #else /* AFS_64BIT_CLIENT */
1512 length
= astat
->Length
;
1513 #endif /* AFS_64BIT_CLIENT */
1514 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1515 * number for each bulk status request. Under no circumstances
1516 * should afs_DoBulkStat store a sequence number if the new
1517 * length will be ignored when afs_ProcessFS is called with
1518 * new stats. If you change the following conditional then you
1519 * also need to change the conditional in afs_DoBulkStat. */
1521 if ((avc
->execsOrWriters
<= 0) && !afs_DirtyPages(avc
)
1522 && !AFS_VN_MAPPED((vnode_t
*) avc
)) {
1524 if ((avc
->execsOrWriters
<= 0) && !afs_DirtyPages(avc
)) {
1526 /* if we're writing or mapping this file, don't fetch over these
1529 afs_Trace3(afs_iclSetp
, CM_TRACE_PROCESSFS
, ICL_TYPE_POINTER
, avc
,
1530 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(avc
->f
.m
.Length
),
1531 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(length
));
1532 avc
->f
.m
.Length
= length
;
1533 avc
->f
.m
.Date
= astat
->ClientModTime
;
1535 hset64(newDV
, astat
->dataVersionHigh
, astat
->DataVersion
);
1536 afs_SetDataVersion(avc
, &newDV
);
1537 avc
->f
.m
.Owner
= astat
->Owner
;
1538 avc
->f
.m
.Mode
= astat
->UnixModeBits
;
1539 avc
->f
.m
.Group
= astat
->Group
;
1540 avc
->f
.m
.LinkCount
= astat
->LinkCount
;
1541 if (astat
->FileType
== File
) {
1542 vSetType(avc
, VREG
);
1543 avc
->f
.m
.Mode
|= S_IFREG
;
1544 } else if (astat
->FileType
== Directory
) {
1545 vSetType(avc
, VDIR
);
1546 avc
->f
.m
.Mode
|= S_IFDIR
;
1547 } else if (astat
->FileType
== SymbolicLink
) {
1548 if (afs_fakestat_enable
&& (avc
->f
.m
.Mode
& 0111) == 0) {
1549 vSetType(avc
, VDIR
);
1550 avc
->f
.m
.Mode
|= S_IFDIR
;
1552 vSetType(avc
, VLNK
);
1553 avc
->f
.m
.Mode
|= S_IFLNK
;
1555 if ((avc
->f
.m
.Mode
& 0111) == 0) {
1556 avc
->mvstat
= AFS_MVSTAT_MTPT
;
1559 avc
->f
.anyAccess
= astat
->AnonymousAccess
;
1561 if ((astat
->CallerAccess
& ~astat
->AnonymousAccess
))
1563 * Caller has at least one bit not covered by anonymous, and
1564 * thus may have interesting rights.
1566 * HOWEVER, this is a really bad idea, because any access query
1567 * for bits which aren't covered by anonymous, on behalf of a user
1568 * who doesn't have any special rights, will result in an answer of
1569 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1570 * It's an especially bad idea under Ultrix, since (due to the lack of
1571 * a proper access() call) it must perform several afs_access() calls
1572 * in order to create magic mode bits that vary according to who makes
1573 * the call. In other words, _every_ stat() generates a test for
1576 #endif /* badidea */
1578 struct axscache
*ac
;
1579 if (avc
->Access
&& (ac
= afs_FindAxs(avc
->Access
, areq
->uid
)))
1580 ac
->axess
= astat
->CallerAccess
;
1581 else /* not found, add a new one if possible */
1582 afs_AddAxs(avc
->Access
, areq
->uid
, astat
->CallerAccess
);
1584 } /*afs_ProcessFS */
1588 * Get fid from server.
1591 * \param areq Request to be passed on.
1592 * \param name Name of ?? to lookup.
1593 * \param OutStatus Fetch status.
1598 * \return Success status of operation.
1601 afs_RemoteLookup(struct VenusFid
*afid
, struct vrequest
*areq
,
1602 char *name
, struct VenusFid
*nfid
,
1603 struct AFSFetchStatus
*OutStatusp
,
1604 struct AFSCallBack
*CallBackp
, struct server
**serverp
,
1605 struct AFSVolSync
*tsyncp
)
1608 struct afs_conn
*tc
;
1609 struct rx_connection
*rxconn
;
1610 struct AFSFetchStatus OutDirStatus
;
1613 name
= ""; /* XXX */
1615 tc
= afs_Conn(afid
, areq
, SHARED_LOCK
, &rxconn
);
1618 *serverp
= tc
->parent
->srvr
->server
;
1619 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP
);
1622 RXAFS_Lookup(rxconn
, (struct AFSFid
*)&afid
->Fid
, name
,
1623 (struct AFSFid
*)&nfid
->Fid
, OutStatusp
,
1624 &OutDirStatus
, CallBackp
, tsyncp
);
1629 } while (afs_Analyze
1630 (tc
, rxconn
, code
, afid
, areq
, AFS_STATS_FS_RPCIDX_XLOOKUP
, SHARED_LOCK
,
1640 * Given a file id and a vrequest structure, fetch the status
1641 * information associated with the file.
1643 * \param afid File ID.
1644 * \param areq Ptr to associated vrequest structure, specifying the
1645 * user whose authentication tokens will be used.
1646 * \param avc Caller may already have a vcache for this file, which is
1649 * \note Environment:
1650 * The cache entry is returned with an increased vrefCount field.
1651 * The entry must be discarded by calling afs_PutVCache when you
1652 * are through using the pointer to the cache entry.
1654 * You should not hold any locks when calling this function, except
1655 * locks on other vcache entries. If you lock more than one vcache
1656 * entry simultaneously, you should lock them in this order:
1658 * 1. Lock all files first, then directories.
1659 * 2. Within a particular type, lock entries in Fid.Vnode order.
1661 * This locking hierarchy is convenient because it allows locking
1662 * of a parent dir cache entry, given a file (to check its access
1663 * control list). It also allows renames to be handled easily by
1664 * locking directories in a constant order.
1666 * \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1668 * \note Might have a vcache structure already, which must
1669 * already be held by the caller
1672 afs_GetVCache(struct VenusFid
*afid
, struct vrequest
*areq
,
1673 afs_int32
* cached
, struct vcache
*avc
)
1676 afs_int32 code
, newvcache
= 0;
1681 AFS_STATCNT(afs_GetVCache
);
1684 *cached
= 0; /* Init just in case */
1686 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1690 ObtainSharedLock(&afs_xvcache
, 5);
1692 tvc
= afs_FindVCache(afid
, &retry
, DO_STATS
| DO_VLRU
| IS_SLOCK
);
1694 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1695 ReleaseSharedLock(&afs_xvcache
);
1696 spunlock_psema(tvc
->v
.v_lock
, retry
, &tvc
->v
.v_sync
, PINOD
);
1703 osi_Assert((tvc
->f
.states
& CVInit
) == 0);
1704 /* If we are in readdir, return the vnode even if not statd */
1705 if ((tvc
->f
.states
& CStatd
) || afs_InReadDir(tvc
)) {
1706 ReleaseSharedLock(&afs_xvcache
);
1710 UpgradeSToWLock(&afs_xvcache
, 21);
1712 /* no cache entry, better grab one */
1713 tvc
= afs_NewVCache(afid
, NULL
);
1716 ConvertWToSLock(&afs_xvcache
);
1719 ReleaseSharedLock(&afs_xvcache
);
1723 afs_stats_cmperf
.vcacheMisses
++;
1726 ReleaseSharedLock(&afs_xvcache
);
1728 ObtainWriteLock(&tvc
->lock
, 54);
1730 if (tvc
->f
.states
& CStatd
) {
1731 ReleaseWriteLock(&tvc
->lock
);
1734 #ifdef AFS_DARWIN80_ENV
1735 /* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
1738 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
1740 * XXX - I really don't like this. Should try to understand better.
1741 * It seems that sometimes, when we get called, we already hold the
1742 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1743 * We can't drop the vnode lock, because that could result in a race.
1744 * Sometimes, though, we get here and don't hold the vnode lock.
1745 * I hate code paths that sometimes hold locks and sometimes don't.
1746 * In any event, the dodge we use here is to check whether the vnode
1747 * is locked, and if it isn't, then we gain and drop it around the call
1748 * to vinvalbuf; otherwise, we leave it alone.
1751 struct vnode
*vp
= AFSTOV(tvc
);
1754 #if defined(AFS_DARWIN_ENV)
1755 iheldthelock
= VOP_ISLOCKED(vp
);
1757 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, current_proc());
1758 /* this is messy. we can call fsync which will try to reobtain this */
1759 if (VTOAFS(vp
) == tvc
)
1760 ReleaseWriteLock(&tvc
->lock
);
1761 if (UBCINFOEXISTS(vp
)) {
1762 vinvalbuf(vp
, V_SAVE
, &afs_osi_cred
, current_proc(), PINOD
, 0);
1764 if (VTOAFS(vp
) == tvc
)
1765 ObtainWriteLock(&tvc
->lock
, 954);
1767 VOP_UNLOCK(vp
, LK_EXCLUSIVE
, current_proc());
1768 #elif defined(AFS_FBSD80_ENV)
1769 iheldthelock
= VOP_ISLOCKED(vp
);
1770 if (!iheldthelock
) {
1771 /* nosleep/sleep lock order reversal */
1772 int glocked
= ISAFS_GLOCK();
1775 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1779 vinvalbuf(vp
, V_SAVE
, PINOD
, 0); /* changed late in 8.0-CURRENT */
1782 #elif defined(AFS_FBSD60_ENV)
1783 iheldthelock
= VOP_ISLOCKED(vp
, curthread
);
1785 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, curthread
);
1787 vinvalbuf(vp
, V_SAVE
, curthread
, PINOD
, 0);
1790 VOP_UNLOCK(vp
, LK_EXCLUSIVE
, curthread
);
1791 #elif defined(AFS_FBSD_ENV)
1792 iheldthelock
= VOP_ISLOCKED(vp
, curthread
);
1794 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
, curthread
);
1795 vinvalbuf(vp
, V_SAVE
, osi_curcred(), curthread
, PINOD
, 0);
1797 VOP_UNLOCK(vp
, LK_EXCLUSIVE
, curthread
);
1798 #elif defined(AFS_OBSD_ENV)
1799 iheldthelock
= VOP_ISLOCKED(vp
, curproc
);
1801 VOP_LOCK(vp
, LK_EXCLUSIVE
| LK_RETRY
, curproc
);
1802 uvm_vnp_uncache(vp
);
1804 VOP_UNLOCK(vp
, 0, curproc
);
1805 #elif defined(AFS_NBSD40_ENV)
1806 iheldthelock
= VOP_ISLOCKED(vp
);
1807 if (!iheldthelock
) {
1808 VOP_LOCK(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1810 uvm_vnp_uncache(vp
);
1818 afs_StaleVCacheFlags(tvc
, AFS_STALEVC_NODNLC
| AFS_STALEVC_CLEARCB
,
1821 /* It is always appropriate to throw away all the access rights? */
1822 afs_FreeAllAxs(&(tvc
->Access
));
1823 tvp
= afs_GetVolume(afid
, areq
, READ_LOCK
); /* copy useful per-volume info */
1825 if ((tvp
->states
& VForeign
)) {
1827 tvc
->f
.states
|= CForeign
;
1828 if (newvcache
&& (tvp
->rootVnode
== afid
->Fid
.Vnode
)
1829 && (tvp
->rootUnique
== afid
->Fid
.Unique
)) {
1830 tvc
->mvstat
= AFS_MVSTAT_ROOT
;
1833 if (tvp
->states
& VRO
)
1834 tvc
->f
.states
|= CRO
;
1835 if (tvp
->states
& VBackup
)
1836 tvc
->f
.states
|= CBackup
;
1837 /* now copy ".." entry back out of volume structure, if necessary */
1838 if (tvc
->mvstat
== AFS_MVSTAT_ROOT
&& tvp
->dotdot
.Fid
.Volume
!= 0) {
1839 if (!tvc
->mvid
.parent
)
1840 tvc
->mvid
.parent
= (struct VenusFid
*)
1841 osi_AllocSmallSpace(sizeof(struct VenusFid
));
1842 *tvc
->mvid
.parent
= tvp
->dotdot
;
1844 afs_PutVolume(tvp
, READ_LOCK
);
1848 afs_RemoveVCB(afid
);
1850 struct AFSFetchStatus OutStatus
;
1852 if (afs_DynrootNewVnode(tvc
, &OutStatus
)) {
1853 afs_ProcessFS(tvc
, &OutStatus
, areq
);
1854 tvc
->f
.states
|= CStatd
| CUnique
;
1855 tvc
->f
.parent
.vnode
= OutStatus
.ParentVnode
;
1856 tvc
->f
.parent
.unique
= OutStatus
.ParentUnique
;
1860 if (AFS_IS_DISCONNECTED
) {
1861 /* Nothing to do otherwise...*/
1863 /* printf("Network is down in afs_GetCache"); */
1865 code
= afs_FetchStatus(tvc
, afid
, areq
, &OutStatus
);
1867 /* For the NFS translator's benefit, make sure
1868 * non-directory vnodes always have their parent FID set
1869 * correctly, even when created as a result of decoding an
1870 * NFS filehandle. It would be nice to also do this for
1871 * directories, but we can't because the fileserver fills
1872 * in the FID of the directory itself instead of that of
1875 if (!code
&& OutStatus
.FileType
!= Directory
&&
1876 !tvc
->f
.parent
.vnode
) {
1877 tvc
->f
.parent
.vnode
= OutStatus
.ParentVnode
;
1878 tvc
->f
.parent
.unique
= OutStatus
.ParentUnique
;
1879 /* XXX - SXW - It's conceivable we should mark ourselves
1880 * as dirty again here, incase we've been raced
1881 * out of the FetchStatus call.
1888 ReleaseWriteLock(&tvc
->lock
);
1894 ReleaseWriteLock(&tvc
->lock
);
1897 } /*afs_GetVCache */
1902 * Lookup a vcache by fid. Look inside the cache first, if not
1903 * there, lookup the file on the server, and then get it's fresh
1908 * \param cached Is element cached? If NULL, don't answer.
1912 * \return The found element or NULL.
1915 afs_LookupVCache(struct VenusFid
*afid
, struct vrequest
*areq
,
1916 afs_int32
* cached
, struct vcache
*adp
, char *aname
)
1918 afs_int32 code
, now
, newvcache
= 0;
1919 struct VenusFid nfid
;
1922 struct AFSFetchStatus OutStatus
;
1923 struct AFSCallBack CallBack
;
1924 struct AFSVolSync tsync
;
1925 struct server
*serverp
= 0;
1929 AFS_STATCNT(afs_GetVCache
);
1931 *cached
= 0; /* Init just in case */
1933 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1937 ObtainReadLock(&afs_xvcache
);
1938 tvc
= afs_FindVCache(afid
, &retry
, DO_STATS
/* no vlru */ );
1941 ReleaseReadLock(&afs_xvcache
);
1943 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1944 spunlock_psema(tvc
->v
.v_lock
, retry
, &tvc
->v
.v_sync
, PINOD
);
1948 ObtainReadLock(&tvc
->lock
);
1950 if (tvc
->f
.states
& CStatd
) {
1954 ReleaseReadLock(&tvc
->lock
);
1957 tvc
->f
.states
&= ~CUnique
;
1959 ReleaseReadLock(&tvc
->lock
);
1961 ObtainReadLock(&afs_xvcache
);
1964 ReleaseReadLock(&afs_xvcache
);
1966 /* lookup the file */
1969 origCBs
= afs_allCBs
; /* if anything changes, we don't have a cb */
1971 if (AFS_IS_DISCONNECTED
) {
1972 /* printf("Network is down in afs_LookupVcache\n"); */
1976 afs_RemoteLookup(&adp
->f
.fid
, areq
, aname
, &nfid
, &OutStatus
,
1977 &CallBack
, &serverp
, &tsync
);
1979 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1983 ObtainSharedLock(&afs_xvcache
, 6);
1984 tvc
= afs_FindVCache(&nfid
, &retry
, DO_VLRU
| IS_SLOCK
/* no xstats now */ );
1986 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1987 ReleaseSharedLock(&afs_xvcache
);
1988 spunlock_psema(tvc
->v
.v_lock
, retry
, &tvc
->v
.v_sync
, PINOD
);
1994 /* no cache entry, better grab one */
1995 UpgradeSToWLock(&afs_xvcache
, 22);
1996 tvc
= afs_NewVCache(&nfid
, serverp
);
1998 ConvertWToSLock(&afs_xvcache
);
2001 ReleaseSharedLock(&afs_xvcache
);
2006 ReleaseSharedLock(&afs_xvcache
);
2007 ObtainWriteLock(&tvc
->lock
, 55);
2009 /* It is always appropriate to throw away all the access rights? */
2010 afs_FreeAllAxs(&(tvc
->Access
));
2011 tvp
= afs_GetVolume(afid
, areq
, READ_LOCK
); /* copy useful per-vol info */
2013 if ((tvp
->states
& VForeign
)) {
2015 tvc
->f
.states
|= CForeign
;
2016 if (newvcache
&& (tvp
->rootVnode
== afid
->Fid
.Vnode
)
2017 && (tvp
->rootUnique
== afid
->Fid
.Unique
))
2018 tvc
->mvstat
= AFS_MVSTAT_ROOT
;
2020 if (tvp
->states
& VRO
)
2021 tvc
->f
.states
|= CRO
;
2022 if (tvp
->states
& VBackup
)
2023 tvc
->f
.states
|= CBackup
;
2024 /* now copy ".." entry back out of volume structure, if necessary */
2025 if (tvc
->mvstat
== AFS_MVSTAT_ROOT
&& tvp
->dotdot
.Fid
.Volume
!= 0) {
2026 if (!tvc
->mvid
.parent
)
2027 tvc
->mvid
.parent
= (struct VenusFid
*)
2028 osi_AllocSmallSpace(sizeof(struct VenusFid
));
2029 *tvc
->mvid
.parent
= tvp
->dotdot
;
2034 afs_StaleVCacheFlags(tvc
, 0, CUnique
);
2036 afs_PutVolume(tvp
, READ_LOCK
);
2037 ReleaseWriteLock(&tvc
->lock
);
2042 ObtainWriteLock(&afs_xcbhash
, 466);
2043 if (origCBs
== afs_allCBs
) {
2044 if (CallBack
.ExpirationTime
) {
2045 tvc
->callback
= serverp
;
2046 tvc
->cbExpires
= CallBack
.ExpirationTime
+ now
;
2047 tvc
->f
.states
|= CStatd
| CUnique
;
2048 tvc
->f
.states
&= ~CBulkFetching
;
2049 afs_QueueCallback(tvc
, CBHash(CallBack
.ExpirationTime
), tvp
);
2050 } else if (tvc
->f
.states
& CRO
) {
2051 /* adapt gives us an hour. */
2052 tvc
->cbExpires
= 3600 + osi_Time();
2053 /*XXX*/ tvc
->f
.states
|= CStatd
| CUnique
;
2054 tvc
->f
.states
&= ~CBulkFetching
;
2055 afs_QueueCallback(tvc
, CBHash(3600), tvp
);
2057 afs_StaleVCacheFlags(tvc
,
2058 AFS_STALEVC_CBLOCKED
| AFS_STALEVC_CLEARCB
,
2062 afs_StaleVCacheFlags(tvc
,
2063 AFS_STALEVC_CBLOCKED
| AFS_STALEVC_CLEARCB
,
2066 ReleaseWriteLock(&afs_xcbhash
);
2068 afs_PutVolume(tvp
, READ_LOCK
);
2069 afs_ProcessFS(tvc
, &OutStatus
, areq
);
2071 ReleaseWriteLock(&tvc
->lock
);
2077 afs_GetRootVCache(struct VenusFid
*afid
, struct vrequest
*areq
,
2078 afs_int32
* cached
, struct volume
*tvolp
)
2080 afs_int32 code
= 0, i
, newvcache
= 0, haveStatus
= 0;
2081 afs_int32 getNewFid
= 0;
2083 struct VenusFid nfid
;
2085 struct server
*serverp
= 0;
2086 struct AFSFetchStatus OutStatus
;
2087 struct AFSCallBack CallBack
;
2088 struct AFSVolSync tsync
;
2090 #ifdef AFS_DARWIN80_ENV
2097 if (!tvolp
->rootVnode
|| getNewFid
) {
2098 struct VenusFid tfid
;
2101 tfid
.Fid
.Vnode
= 0; /* Means get rootfid of volume */
2102 origCBs
= afs_allCBs
; /* ignore InitCallBackState */
2104 afs_RemoteLookup(&tfid
, areq
, NULL
, &nfid
, &OutStatus
, &CallBack
,
2109 /* ReleaseReadLock(&tvolp->lock); */
2110 ObtainWriteLock(&tvolp
->lock
, 56);
2111 tvolp
->rootVnode
= afid
->Fid
.Vnode
= nfid
.Fid
.Vnode
;
2112 tvolp
->rootUnique
= afid
->Fid
.Unique
= nfid
.Fid
.Unique
;
2113 ReleaseWriteLock(&tvolp
->lock
);
2114 /* ObtainReadLock(&tvolp->lock);*/
2117 afid
->Fid
.Vnode
= tvolp
->rootVnode
;
2118 afid
->Fid
.Unique
= tvolp
->rootUnique
;
2122 ObtainSharedLock(&afs_xvcache
, 7);
2124 for (tvc
= afs_vhashT
[i
]; tvc
; tvc
= tvc
->hnext
) {
2125 if (!FidCmp(&(tvc
->f
.fid
), afid
)) {
2126 if (tvc
->f
.states
& CVInit
) {
2127 ReleaseSharedLock(&afs_xvcache
);
2128 afs_osi_Sleep(&tvc
->f
.states
);
2131 #ifdef AFS_DARWIN80_ENV
2132 if (tvc
->f
.states
& CDeadVnode
) {
2133 ReleaseSharedLock(&afs_xvcache
);
2134 afs_osi_Sleep(&tvc
->f
.states
);
2138 if (vnode_get(tvp
)) /* this bumps ref count */
2140 if (vnode_ref(tvp
)) {
2142 /* AFSTOV(tvc) may be NULL */
2152 if (!haveStatus
&& (!tvc
|| !(tvc
->f
.states
& CStatd
))) {
2153 /* Mount point no longer stat'd or unknown. FID may have changed. */
2155 ReleaseSharedLock(&afs_xvcache
);
2156 #ifdef AFS_DARWIN80_ENV
2159 vnode_put(AFSTOV(tvc
));
2160 vnode_rele(AFSTOV(tvc
));
2169 UpgradeSToWLock(&afs_xvcache
, 23);
2170 /* no cache entry, better grab one */
2171 tvc
= afs_NewVCache(afid
, NULL
);
2174 ReleaseWriteLock(&afs_xvcache
);
2178 afs_stats_cmperf
.vcacheMisses
++;
2182 afs_stats_cmperf
.vcacheHits
++;
2183 #if defined(AFS_DARWIN80_ENV)
2184 /* we already bumped the ref count in the for loop above */
2185 #else /* AFS_DARWIN80_ENV */
2188 UpgradeSToWLock(&afs_xvcache
, 24);
2189 if ((VLRU
.next
->prev
!= &VLRU
) || (VLRU
.prev
->next
!= &VLRU
)) {
2190 refpanic("GRVC VLRU inconsistent0");
2192 if (tvc
->vlruq
.next
->prev
!= &(tvc
->vlruq
)) {
2193 refpanic("GRVC VLRU inconsistent1");
2195 if (tvc
->vlruq
.prev
->next
!= &(tvc
->vlruq
)) {
2196 refpanic("GRVC VLRU inconsistent2");
2198 QRemove(&tvc
->vlruq
); /* move to lruq head */
2199 QAdd(&VLRU
, &tvc
->vlruq
);
2200 if ((VLRU
.next
->prev
!= &VLRU
) || (VLRU
.prev
->next
!= &VLRU
)) {
2201 refpanic("GRVC VLRU inconsistent3");
2203 if (tvc
->vlruq
.next
->prev
!= &(tvc
->vlruq
)) {
2204 refpanic("GRVC VLRU inconsistent4");
2206 if (tvc
->vlruq
.prev
->next
!= &(tvc
->vlruq
)) {
2207 refpanic("GRVC VLRU inconsistent5");
2212 ReleaseWriteLock(&afs_xvcache
);
2214 if (tvc
->f
.states
& CStatd
) {
2218 ObtainReadLock(&tvc
->lock
);
2219 tvc
->f
.states
&= ~CUnique
;
2220 tvc
->callback
= NULL
; /* redundant, perhaps */
2221 ReleaseReadLock(&tvc
->lock
);
2224 ObtainWriteLock(&tvc
->lock
, 57);
2226 /* It is always appropriate to throw away all the access rights? */
2227 afs_FreeAllAxs(&(tvc
->Access
));
2230 tvc
->f
.states
|= CForeign
;
2231 if (tvolp
->states
& VRO
)
2232 tvc
->f
.states
|= CRO
;
2233 if (tvolp
->states
& VBackup
)
2234 tvc
->f
.states
|= CBackup
;
2235 /* now copy ".." entry back out of volume structure, if necessary */
2236 if (newvcache
&& (tvolp
->rootVnode
== afid
->Fid
.Vnode
)
2237 && (tvolp
->rootUnique
== afid
->Fid
.Unique
)) {
2238 tvc
->mvstat
= AFS_MVSTAT_ROOT
;
2240 if (tvc
->mvstat
== AFS_MVSTAT_ROOT
&& tvolp
->dotdot
.Fid
.Volume
!= 0) {
2241 if (!tvc
->mvid
.parent
)
2242 tvc
->mvid
.parent
= (struct VenusFid
*)
2243 osi_AllocSmallSpace(sizeof(struct VenusFid
));
2244 *tvc
->mvid
.parent
= tvolp
->dotdot
;
2248 afs_RemoveVCB(afid
);
2251 struct VenusFid tfid
;
2254 tfid
.Fid
.Vnode
= 0; /* Means get rootfid of volume */
2255 origCBs
= afs_allCBs
; /* ignore InitCallBackState */
2257 afs_RemoteLookup(&tfid
, areq
, NULL
, &nfid
, &OutStatus
, &CallBack
,
2262 afs_StaleVCacheFlags(tvc
, AFS_STALEVC_CLEARCB
, CUnique
);
2263 ReleaseWriteLock(&tvc
->lock
);
2268 ObtainWriteLock(&afs_xcbhash
, 468);
2269 if (origCBs
== afs_allCBs
) {
2270 tvc
->f
.states
|= CTruth
;
2271 tvc
->callback
= serverp
;
2272 if (CallBack
.ExpirationTime
!= 0) {
2273 tvc
->cbExpires
= CallBack
.ExpirationTime
+ start
;
2274 tvc
->f
.states
|= CStatd
;
2275 tvc
->f
.states
&= ~CBulkFetching
;
2276 afs_QueueCallback(tvc
, CBHash(CallBack
.ExpirationTime
), tvolp
);
2277 } else if (tvc
->f
.states
& CRO
) {
2278 /* adapt gives us an hour. */
2279 tvc
->cbExpires
= 3600 + osi_Time();
2280 /*XXX*/ tvc
->f
.states
|= CStatd
;
2281 tvc
->f
.states
&= ~CBulkFetching
;
2282 afs_QueueCallback(tvc
, CBHash(3600), tvolp
);
2285 afs_StaleVCacheFlags(tvc
, AFS_STALEVC_CBLOCKED
| AFS_STALEVC_CLEARCB
,
2288 ReleaseWriteLock(&afs_xcbhash
);
2289 afs_ProcessFS(tvc
, &OutStatus
, areq
);
2291 ReleaseWriteLock(&tvc
->lock
);
2297 * Update callback status and (sometimes) attributes of a vnode.
2298 * Called after doing a fetch status RPC. Whilst disconnected, attributes
2299 * shouldn't be written to the vcache here.
2304 * \param Outsp Server status after rpc call.
2305 * \param acb Callback for this vnode.
2307 * \note The vcache must be write locked.
2310 afs_UpdateStatus(struct vcache
*avc
, struct VenusFid
*afid
,
2311 struct vrequest
*areq
, struct AFSFetchStatus
*Outsp
,
2312 struct AFSCallBack
*acb
, afs_uint32 start
)
2314 struct volume
*volp
;
2317 /* Dont write status in vcache if resyncing after a disconnection. */
2318 afs_ProcessFS(avc
, Outsp
, areq
);
2320 volp
= afs_GetVolume(afid
, areq
, READ_LOCK
);
2321 ObtainWriteLock(&afs_xcbhash
, 469);
2322 avc
->f
.states
|= CTruth
;
2323 if (avc
->callback
/* check for race */ ) {
2324 if (acb
->ExpirationTime
!= 0) {
2325 avc
->cbExpires
= acb
->ExpirationTime
+ start
;
2326 avc
->f
.states
|= CStatd
;
2327 avc
->f
.states
&= ~CBulkFetching
;
2328 afs_QueueCallback(avc
, CBHash(acb
->ExpirationTime
), volp
);
2329 } else if (avc
->f
.states
& CRO
) {
2330 /* ordinary callback on a read-only volume -- AFS 3.2 style */
2331 avc
->cbExpires
= 3600 + start
;
2332 avc
->f
.states
|= CStatd
;
2333 avc
->f
.states
&= ~CBulkFetching
;
2334 afs_QueueCallback(avc
, CBHash(3600), volp
);
2336 afs_StaleVCacheFlags(avc
,
2337 AFS_STALEVC_CBLOCKED
| AFS_STALEVC_CLEARCB
,
2341 afs_StaleVCacheFlags(avc
, AFS_STALEVC_CBLOCKED
| AFS_STALEVC_CLEARCB
,
2344 ReleaseWriteLock(&afs_xcbhash
);
2346 afs_PutVolume(volp
, READ_LOCK
);
2350 afs_BadFetchStatus(struct afs_conn
*tc
)
2352 int addr
= ntohl(tc
->parent
->srvr
->sa_ip
);
2353 afs_warn("afs: Invalid AFSFetchStatus from server %u.%u.%u.%u\n",
2354 (addr
>> 24) & 0xff, (addr
>> 16) & 0xff, (addr
>> 8) & 0xff,
2356 afs_warn("afs: This suggests the server may be sending bad data that "
2357 "can lead to availability issues or data corruption. The "
2358 "issue has been avoided for now, but it may not always be "
2359 "detectable. Please upgrade the server if possible.\n");
2363 * Check if a given AFSFetchStatus structure is sane.
2365 * @param[in] tc The server from which we received the status
2366 * @param[in] status The status we received
2368 * @return whether the given structure is valid or not
2369 * @retval 0 the structure is fine
2370 * @retval nonzero the structure looks like garbage; act as if we received
2371 * the returned error code from the server
2374 afs_CheckFetchStatus(struct afs_conn
*tc
, struct AFSFetchStatus
*status
)
2376 if (status
->errorCode
||
2377 status
->InterfaceVersion
!= 1 ||
2378 !(status
->FileType
> Invalid
&& status
->FileType
<= SymbolicLink
) ||
2379 status
->ParentVnode
== 0 || status
->ParentUnique
== 0) {
2381 afs_warn("afs: FetchStatus ec %u iv %u ft %u pv %u pu %u\n",
2382 (unsigned)status
->errorCode
, (unsigned)status
->InterfaceVersion
,
2383 (unsigned)status
->FileType
, (unsigned)status
->ParentVnode
,
2384 (unsigned)status
->ParentUnique
);
2385 afs_BadFetchStatus(tc
);
2393 * Must be called with avc write-locked
2394 * don't absolutely have to invalidate the hint unless the dv has
2395 * changed, but be sure to get it right else there will be consistency bugs.
2398 afs_FetchStatus(struct vcache
* avc
, struct VenusFid
* afid
,
2399 struct vrequest
* areq
, struct AFSFetchStatus
* Outsp
)
2402 afs_uint32 start
= 0;
2403 struct afs_conn
*tc
;
2404 struct AFSCallBack CallBack
;
2405 struct AFSVolSync tsync
;
2406 struct rx_connection
*rxconn
;
2409 tc
= afs_Conn(afid
, areq
, SHARED_LOCK
, &rxconn
);
2410 avc
->dchint
= NULL
; /* invalidate hints */
2412 avc
->callback
= tc
->parent
->srvr
->server
;
2414 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS
);
2417 RXAFS_FetchStatus(rxconn
, (struct AFSFid
*)&afid
->Fid
, Outsp
,
2424 code
= afs_CheckFetchStatus(tc
, Outsp
);
2429 } while (afs_Analyze
2430 (tc
, rxconn
, code
, afid
, areq
, AFS_STATS_FS_RPCIDX_FETCHSTATUS
,
2431 SHARED_LOCK
, NULL
));
2434 afs_UpdateStatus(avc
, afid
, areq
, Outsp
, &CallBack
, start
);
2436 /* used to undo the local callback, but that's too extreme.
2437 * There are plenty of good reasons that fetchstatus might return
2438 * an error, such as EPERM. If we have the vnode cached, statd,
2439 * with callback, might as well keep track of the fact that we
2440 * don't have access...
2442 if (code
== EPERM
|| code
== EACCES
) {
2443 struct axscache
*ac
;
2444 if (avc
->Access
&& (ac
= afs_FindAxs(avc
->Access
, areq
->uid
)))
2446 else /* not found, add a new one if possible */
2447 afs_AddAxs(avc
->Access
, areq
->uid
, 0);
2458 * Stuff some information into the vcache for the given file.
2461 * afid : File in question.
2462 * OutStatus : Fetch status on the file.
2463 * CallBack : Callback info.
2464 * tc : RPC connection involved.
2465 * areq : vrequest involved.
2468 * Nothing interesting.
2471 afs_StuffVcache(struct VenusFid
*afid
,
2472 struct AFSFetchStatus
*OutStatus
,
2473 struct AFSCallBack
*CallBack
, struct afs_conn
*tc
,
2474 struct vrequest
*areq
)
2476 afs_int32 code
, i
, newvcache
= 0;
2478 struct AFSVolSync tsync
;
2480 struct axscache
*ac
;
2483 AFS_STATCNT(afs_StuffVcache
);
2484 #ifdef IFS_VCACHECOUNT
2489 ObtainSharedLock(&afs_xvcache
, 8);
2491 tvc
= afs_FindVCache(afid
, &retry
, DO_VLRU
| IS_SLOCK
/* no stats */ );
2493 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2494 ReleaseSharedLock(&afs_xvcache
);
2495 spunlock_psema(tvc
->v
.v_lock
, retry
, &tvc
->v
.v_sync
, PINOD
);
2501 /* no cache entry, better grab one */
2502 UpgradeSToWLock(&afs_xvcache
, 25);
2503 tvc
= afs_NewVCache(afid
, NULL
);
2505 ConvertWToSLock(&afs_xvcache
);
2508 ReleaseSharedLock(&afs_xvcache
);
2513 ReleaseSharedLock(&afs_xvcache
);
2514 ObtainWriteLock(&tvc
->lock
, 58);
2516 afs_StaleVCacheFlags(tvc
, AFS_STALEVC_NOCB
, 0);
2518 /* Is it always appropriate to throw away all the access rights? */
2519 afs_FreeAllAxs(&(tvc
->Access
));
2521 /*Copy useful per-volume info */
2522 tvp
= afs_GetVolume(afid
, areq
, READ_LOCK
);
2524 if (newvcache
&& (tvp
->states
& VForeign
))
2525 tvc
->f
.states
|= CForeign
;
2526 if (tvp
->states
& VRO
)
2527 tvc
->f
.states
|= CRO
;
2528 if (tvp
->states
& VBackup
)
2529 tvc
->f
.states
|= CBackup
;
2531 * Now, copy ".." entry back out of volume structure, if
2534 if (tvc
->mvstat
== AFS_MVSTAT_ROOT
&& tvp
->dotdot
.Fid
.Volume
!= 0) {
2535 if (!tvc
->mvid
.parent
)
2536 tvc
->mvid
.parent
= (struct VenusFid
*)
2537 osi_AllocSmallSpace(sizeof(struct VenusFid
));
2538 *tvc
->mvid
.parent
= tvp
->dotdot
;
2541 /* store the stat on the file */
2542 afs_RemoveVCB(afid
);
2543 afs_ProcessFS(tvc
, OutStatus
, areq
);
2544 tvc
->callback
= tc
->srvr
->server
;
2546 /* we use osi_Time twice below. Ideally, we would use the time at which
2547 * the FetchStatus call began, instead, but we don't have it here. So we
2548 * make do with "now". In the CRO case, it doesn't really matter. In
2549 * the other case, we hope that the difference between "now" and when the
2550 * call actually began execution on the server won't be larger than the
2551 * padding which the server keeps. Subtract 1 second anyway, to be on
2552 * the safe side. Can't subtract more because we don't know how big
2553 * ExpirationTime is. Possible consistency problems may arise if the call
2554 * timeout period becomes longer than the server's expiration padding. */
2555 ObtainWriteLock(&afs_xcbhash
, 470);
2556 if (CallBack
->ExpirationTime
!= 0) {
2557 tvc
->cbExpires
= CallBack
->ExpirationTime
+ osi_Time() - 1;
2558 tvc
->f
.states
|= CStatd
;
2559 tvc
->f
.states
&= ~CBulkFetching
;
2560 afs_QueueCallback(tvc
, CBHash(CallBack
->ExpirationTime
), tvp
);
2561 } else if (tvc
->f
.states
& CRO
) {
2562 /* old-fashioned AFS 3.2 style */
2563 tvc
->cbExpires
= 3600 + osi_Time();
2564 /*XXX*/ tvc
->f
.states
|= CStatd
;
2565 tvc
->f
.states
&= ~CBulkFetching
;
2566 afs_QueueCallback(tvc
, CBHash(3600), tvp
);
2568 afs_StaleVCacheFlags(tvc
, AFS_STALEVC_CBLOCKED
| AFS_STALEVC_CLEARCB
,
2571 ReleaseWriteLock(&afs_xcbhash
);
2573 afs_PutVolume(tvp
, READ_LOCK
);
2575 /* look in per-pag cache */
2576 if (tvc
->Access
&& (ac
= afs_FindAxs(tvc
->Access
, areq
->uid
)))
2577 ac
->axess
= OutStatus
->CallerAccess
; /* substitute pags */
2578 else /* not found, add a new one if possible */
2579 afs_AddAxs(tvc
->Access
, areq
->uid
, OutStatus
->CallerAccess
);
2581 ReleaseWriteLock(&tvc
->lock
);
2582 afs_Trace4(afs_iclSetp
, CM_TRACE_STUFFVCACHE
, ICL_TYPE_POINTER
, tvc
,
2583 ICL_TYPE_POINTER
, tvc
->callback
, ICL_TYPE_INT32
,
2584 tvc
->cbExpires
, ICL_TYPE_INT32
, tvc
->cbExpires
- osi_Time());
2586 * Release ref count... hope this guy stays around...
2589 } /*afs_StuffVcache */
2593 * Decrements the reference count on a cache entry.
2595 * \param avc Pointer to the cache entry to decrement.
2597 * \note Environment: Nothing interesting.
2600 afs_PutVCache(struct vcache
*avc
)
2602 AFS_STATCNT(afs_PutVCache
);
2603 #ifdef AFS_DARWIN80_ENV
2604 vnode_put(AFSTOV(avc
));
2608 * Can we use a read lock here?
2610 ObtainReadLock(&afs_xvcache
);
2612 ReleaseReadLock(&afs_xvcache
);
2614 } /*afs_PutVCache */
2618 * Reset a vcache entry, so local contents are ignored, and the
2619 * server will be reconsulted next time the vcache is used
2621 * \param avc Pointer to the cache entry to reset
2623 * \param skipdnlc skip the dnlc purge for this vnode
2625 * \note avc must be write locked on entry
2627 * \note The caller should purge the dnlc when skipdnlc is set.
2630 afs_ResetVCache(struct vcache
*avc
, afs_ucred_t
*acred
, afs_int32 skipdnlc
)
2632 afs_stalevc_flags_t flags
= 0;
2634 flags
|= AFS_STALEVC_NODNLC
;
2637 afs_StaleVCacheFlags(avc
, flags
, CDirty
); /* next reference will re-stat */
2638 /* now find the disk cache entries */
2639 afs_TryToSmush(avc
, acred
, 1);
2640 if (avc
->linkData
&& !(avc
->f
.states
& CCore
)) {
2641 afs_osi_Free(avc
->linkData
, strlen(avc
->linkData
) + 1);
2642 avc
->linkData
= NULL
;
2647 * Sleepa when searching for a vcache. Releases all the pending locks,
2648 * sleeps then obtains the previously released locks.
2650 * \param vcache Enter sleep state.
2651 * \param flag Determines what locks to use.
2656 findvc_sleep(struct vcache
*avc
, int flag
)
2658 if (flag
& IS_SLOCK
) {
2659 ReleaseSharedLock(&afs_xvcache
);
2661 if (flag
& IS_WLOCK
) {
2662 ReleaseWriteLock(&afs_xvcache
);
2664 ReleaseReadLock(&afs_xvcache
);
2667 afs_osi_Sleep(&avc
->f
.states
);
2668 if (flag
& IS_SLOCK
) {
2669 ObtainSharedLock(&afs_xvcache
, 341);
2671 if (flag
& IS_WLOCK
) {
2672 ObtainWriteLock(&afs_xvcache
, 343);
2674 ObtainReadLock(&afs_xvcache
);
2680 * Add a reference on an existing vcache entry.
2682 * \param tvc Pointer to the vcache.
2684 * \note Environment: Must be called with at least one reference from
2685 * elsewhere on the vcache, even if that reference will be dropped.
2686 * The global lock is required.
2688 * \return 0 on success, -1 on failure.
2692 afs_RefVCache(struct vcache
*tvc
)
2694 #ifdef AFS_DARWIN80_ENV
2698 /* AFS_STATCNT(afs_RefVCache); */
2700 #ifdef AFS_DARWIN80_ENV
2704 if (vnode_ref(tvp
)) {
2706 /* AFSTOV(tvc) may be NULL */
2715 } /*afs_RefVCache */
2718 * Find a vcache entry given a fid.
2720 * \param afid Pointer to the fid whose cache entry we desire.
2721 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2722 * unlock the vnode, and try again.
2723 * \param flag Bit 1 to specify whether to compute hit statistics. Not
2724 * set if FindVCache is called as part of internal bookkeeping.
2726 * \note Environment: Must be called with the afs_xvcache lock at least held at
2727 * the read level. In order to do the VLRU adjustment, the xvcache lock
2728 * must be shared-- we upgrade it here.
2732 afs_FindVCache(struct VenusFid
*afid
, afs_int32
* retry
, afs_int32 flag
)
2737 #ifdef AFS_DARWIN80_ENV
2738 struct vcache
*deadvc
= NULL
, *livevc
= NULL
;
2742 AFS_STATCNT(afs_FindVCache
);
2746 for (tvc
= afs_vhashT
[i
]; tvc
; tvc
= tvc
->hnext
) {
2747 if (FidMatches(afid
, tvc
)) {
2748 if (tvc
->f
.states
& CVInit
) {
2749 findvc_sleep(tvc
, flag
);
2752 #ifdef AFS_DARWIN80_ENV
2753 if (tvc
->f
.states
& CDeadVnode
) {
2754 findvc_sleep(tvc
, flag
);
2762 /* should I have a read lock on the vnode here? */
2766 #if defined(AFS_DARWIN80_ENV)
2770 if (tvp
&& vnode_ref(tvp
)) {
2772 /* AFSTOV(tvc) may be NULL */
2781 #elif defined(AFS_DARWIN_ENV)
2782 tvc
->f
.states
|= CUBCinit
;
2784 if (UBCINFOMISSING(AFSTOV(tvc
)) ||
2785 UBCINFORECLAIMED(AFSTOV(tvc
))) {
2786 ubc_info_init(AFSTOV(tvc
));
2789 tvc
->f
.states
&= ~CUBCinit
;
2791 osi_vnhold(tvc
, retry
); /* already held, above */
2792 if (retry
&& *retry
)
2796 * only move to front of vlru if we have proper vcache locking)
2798 if (flag
& DO_VLRU
) {
2799 if ((VLRU
.next
->prev
!= &VLRU
) || (VLRU
.prev
->next
!= &VLRU
)) {
2800 refpanic("FindVC VLRU inconsistent1");
2802 if (tvc
->vlruq
.next
->prev
!= &(tvc
->vlruq
)) {
2803 refpanic("FindVC VLRU inconsistent1");
2805 if (tvc
->vlruq
.prev
->next
!= &(tvc
->vlruq
)) {
2806 refpanic("FindVC VLRU inconsistent2");
2808 UpgradeSToWLock(&afs_xvcache
, 26);
2809 QRemove(&tvc
->vlruq
);
2810 QAdd(&VLRU
, &tvc
->vlruq
);
2811 ConvertWToSLock(&afs_xvcache
);
2812 if ((VLRU
.next
->prev
!= &VLRU
) || (VLRU
.prev
->next
!= &VLRU
)) {
2813 refpanic("FindVC VLRU inconsistent1");
2815 if (tvc
->vlruq
.next
->prev
!= &(tvc
->vlruq
)) {
2816 refpanic("FindVC VLRU inconsistent2");
2818 if (tvc
->vlruq
.prev
->next
!= &(tvc
->vlruq
)) {
2819 refpanic("FindVC VLRU inconsistent3");
2825 if (flag
& DO_STATS
) {
2827 afs_stats_cmperf
.vcacheHits
++;
2829 afs_stats_cmperf
.vcacheMisses
++;
2830 if (afs_IsPrimaryCellNum(afid
->Cell
))
2831 afs_stats_cmperf
.vlocalAccesses
++;
2833 afs_stats_cmperf
.vremoteAccesses
++;
2836 } /*afs_FindVCache */
2839 * Find a vcache entry given a fid. Does a wildcard match on what we
2840 * have for the fid. If more than one entry, don't return anything.
2842 * \param avcp Fill in pointer if we found one and only one.
2843 * \param afid Pointer to the fid whose cache entry we desire.
2844 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2845 * unlock the vnode, and try again.
2846 * \param flags bit 1 to specify whether to compute hit statistics. Not
2847 * set if FindVCache is called as part of internal bookkeeping.
2849 * \note Environment: Must be called with the afs_xvcache lock at least held at
2850 * the read level. In order to do the VLRU adjustment, the xvcache lock
2851 * must be shared-- we upgrade it here.
2853 * \return Number of matches found.
2856 int afs_duplicate_nfs_fids
= 0;
2859 afs_NFSFindVCache(struct vcache
**avcp
, struct VenusFid
*afid
)
2863 afs_int32 count
= 0;
2864 struct vcache
*found_tvc
= NULL
;
2865 #ifdef AFS_DARWIN80_ENV
2869 AFS_STATCNT(afs_FindVCache
);
2873 ObtainSharedLock(&afs_xvcache
, 331);
2876 for (tvc
= afs_vhashT
[i
]; tvc
; tvc
= tvc
->hnext
) {
2877 /* Match only on what we have.... */
2878 if (((tvc
->f
.fid
.Fid
.Vnode
& 0xffff) == afid
->Fid
.Vnode
)
2879 && (tvc
->f
.fid
.Fid
.Volume
== afid
->Fid
.Volume
)
2880 && ((tvc
->f
.fid
.Fid
.Unique
& 0xffffff) == afid
->Fid
.Unique
)
2881 && (tvc
->f
.fid
.Cell
== afid
->Cell
)) {
2882 if (tvc
->f
.states
& CVInit
) {
2883 ReleaseSharedLock(&afs_xvcache
);
2884 afs_osi_Sleep(&tvc
->f
.states
);
2887 #ifdef AFS_DARWIN80_ENV
2888 if (tvc
->f
.states
& CDeadVnode
) {
2889 ReleaseSharedLock(&afs_xvcache
);
2890 afs_osi_Sleep(&tvc
->f
.states
);
2894 if (vnode_get(tvp
)) {
2895 /* This vnode no longer exists. */
2898 if (vnode_ref(tvp
)) {
2899 /* This vnode no longer exists. */
2901 /* AFSTOV(tvc) may be NULL */
2906 #endif /* AFS_DARWIN80_ENV */
2910 afs_duplicate_nfs_fids
++;
2911 ReleaseSharedLock(&afs_xvcache
);
2912 #ifdef AFS_DARWIN80_ENV
2913 /* Drop our reference counts. */
2914 vnode_put(AFSTOV(tvc
));
2915 vnode_put(AFSTOV(found_tvc
));
2924 /* should I have a read lock on the vnode here? */
2926 #ifndef AFS_DARWIN80_ENV
2927 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2928 afs_int32 retry
= 0;
2929 osi_vnhold(tvc
, &retry
);
2932 found_tvc
= (struct vcache
*)0;
2933 ReleaseSharedLock(&afs_xvcache
);
2934 spunlock_psema(tvc
->v
.v_lock
, retry
, &tvc
->v
.v_sync
, PINOD
);
2938 osi_vnhold(tvc
, (int *)0); /* already held, above */
2942 * We obtained the xvcache lock above.
2944 if ((VLRU
.next
->prev
!= &VLRU
) || (VLRU
.prev
->next
!= &VLRU
)) {
2945 refpanic("FindVC VLRU inconsistent1");
2947 if (tvc
->vlruq
.next
->prev
!= &(tvc
->vlruq
)) {
2948 refpanic("FindVC VLRU inconsistent1");
2950 if (tvc
->vlruq
.prev
->next
!= &(tvc
->vlruq
)) {
2951 refpanic("FindVC VLRU inconsistent2");
2953 UpgradeSToWLock(&afs_xvcache
, 568);
2954 QRemove(&tvc
->vlruq
);
2955 QAdd(&VLRU
, &tvc
->vlruq
);
2956 ConvertWToSLock(&afs_xvcache
);
2957 if ((VLRU
.next
->prev
!= &VLRU
) || (VLRU
.prev
->next
!= &VLRU
)) {
2958 refpanic("FindVC VLRU inconsistent1");
2960 if (tvc
->vlruq
.next
->prev
!= &(tvc
->vlruq
)) {
2961 refpanic("FindVC VLRU inconsistent2");
2963 if (tvc
->vlruq
.prev
->next
!= &(tvc
->vlruq
)) {
2964 refpanic("FindVC VLRU inconsistent3");
2970 afs_stats_cmperf
.vcacheHits
++;
2972 afs_stats_cmperf
.vcacheMisses
++;
2973 if (afs_IsPrimaryCellNum(afid
->Cell
))
2974 afs_stats_cmperf
.vlocalAccesses
++;
2976 afs_stats_cmperf
.vremoteAccesses
++;
2978 *avcp
= tvc
; /* May be null */
2980 ReleaseSharedLock(&afs_xvcache
);
2981 return (tvc
? 1 : 0);
2983 } /*afs_NFSFindVCache */
2989 * Initialize vcache related variables
2994 afs_vcacheInit(int astatSize
)
2996 #if !defined(AFS_LINUX22_ENV)
3000 if (!afs_maxvcount
) {
3001 afs_maxvcount
= astatSize
; /* no particular limit on linux? */
3003 #if !defined(AFS_LINUX22_ENV)
3007 AFS_RWLOCK_INIT(&afs_xvcache
, "afs_xvcache");
3008 LOCK_INIT(&afs_xvcb
, "afs_xvcb");
3010 #if !defined(AFS_LINUX22_ENV)
3011 /* Allocate and thread the struct vcache entries */
3012 tvp
= afs_osi_Alloc(astatSize
* sizeof(struct vcache
));
3013 osi_Assert(tvp
!= NULL
);
3014 memset(tvp
, 0, sizeof(struct vcache
) * astatSize
);
3016 Initial_freeVCList
= tvp
;
3017 freeVCList
= &(tvp
[0]);
3018 for (i
= 0; i
< astatSize
- 1; i
++) {
3019 tvp
[i
].nextfree
= &(tvp
[i
+ 1]);
3021 tvp
[astatSize
- 1].nextfree
= NULL
;
3022 # ifdef KERNEL_HAVE_PIN
3023 pin((char *)tvp
, astatSize
* sizeof(struct vcache
)); /* XXX */
3027 #if defined(AFS_SGI_ENV)
3028 for (i
= 0; i
< astatSize
; i
++) {
3029 char name
[METER_NAMSZ
];
3030 struct vcache
*tvc
= &tvp
[i
];
3032 tvc
->v
.v_number
= ++afsvnumbers
;
3033 tvc
->vc_rwlockid
= OSI_NO_LOCKID
;
3034 initnsema(&tvc
->vc_rwlock
, 1,
3035 makesname(name
, "vrw", tvc
->v
.v_number
));
3036 #ifndef AFS_SGI53_ENV
3037 initnsema(&tvc
->v
.v_sync
, 0, makesname(name
, "vsy", tvc
->v
.v_number
));
3039 #ifndef AFS_SGI62_ENV
3040 initnlock(&tvc
->v
.v_lock
, makesname(name
, "vlk", tvc
->v
.v_number
));
3041 #endif /* AFS_SGI62_ENV */
3045 for(i
= 0; i
< VCSIZE
; ++i
)
3046 QInit(&afs_vhashTV
[i
]);
3053 shutdown_vcache(void)
3056 struct afs_cbr
*tsp
;
3058 * XXX We may potentially miss some of the vcaches because if when
3059 * there are no free vcache entries and all the vcache entries are active
3060 * ones then we allocate an additional one - admittedly we almost never
3065 struct afs_q
*tq
, *uq
= NULL
;
3067 for (tq
= VLRU
.prev
; tq
!= &VLRU
; tq
= uq
) {
3070 if (tvc
->mvid
.target_root
) {
3071 osi_FreeSmallSpace(tvc
->mvid
.target_root
);
3072 tvc
->mvid
.target_root
= NULL
;
3075 aix_gnode_rele(AFSTOV(tvc
));
3077 if (tvc
->linkData
) {
3078 afs_osi_Free(tvc
->linkData
, strlen(tvc
->linkData
) + 1);
3083 * Also free the remaining ones in the Cache
3085 for (i
= 0; i
< VCSIZE
; i
++) {
3086 for (tvc
= afs_vhashT
[i
]; tvc
; tvc
= tvc
->hnext
) {
3087 if (tvc
->mvid
.target_root
) {
3088 osi_FreeSmallSpace(tvc
->mvid
.target_root
);
3089 tvc
->mvid
.target_root
= NULL
;
3093 afs_osi_Free(tvc
->v
.v_gnode
, sizeof(struct gnode
));
3094 #ifdef AFS_AIX32_ENV
3097 vms_delete(tvc
->segid
);
3099 tvc
->segid
= tvc
->vmh
= NULL
;
3100 if (VREFCOUNT_GT(tvc
,0))
3101 osi_Panic("flushVcache: vm race");
3109 #if defined(AFS_SUN5_ENV)
3115 if (tvc
->linkData
) {
3116 afs_osi_Free(tvc
->linkData
, strlen(tvc
->linkData
) + 1);
3121 afs_FreeAllAxs(&(tvc
->Access
));
3127 * Free any leftover callback queue
3129 for (i
= 0; i
< afs_stats_cmperf
.CallBackAlloced
; i
++) {
3130 tsp
= afs_cbrHeads
[i
];
3131 afs_cbrHeads
[i
] = 0;
3132 afs_osi_Free((char *)tsp
, AFS_NCBRS
* sizeof(struct afs_cbr
));
3136 #if !defined(AFS_LINUX22_ENV)
3137 afs_osi_Free(Initial_freeVCList
, afs_cacheStats
* sizeof(struct vcache
));
3139 # ifdef KERNEL_HAVE_PIN
3140 unpin(Initial_freeVCList
, afs_cacheStats
* sizeof(struct vcache
));
3143 freeVCList
= Initial_freeVCList
= 0;
3146 AFS_RWLOCK_INIT(&afs_xvcache
, "afs_xvcache");
3147 LOCK_INIT(&afs_xvcb
, "afs_xvcb");
3149 for(i
= 0; i
< VCSIZE
; ++i
)
3150 QInit(&afs_vhashTV
[i
]);
3154 afs_DisconGiveUpCallbacks(void)
3160 ObtainWriteLock(&afs_xvcache
, 1002); /* XXX - should be a unique number */
3163 /* Somehow, walk the set of vcaches, with each one coming out as tvc */
3164 for (i
= 0; i
< VCSIZE
; i
++) {
3165 for (tvc
= afs_vhashT
[i
]; tvc
; tvc
= tvc
->hnext
) {
3167 if (afs_QueueVCB(tvc
, &slept
)) {
3168 tvc
->callback
= NULL
;
3177 ReleaseWriteLock(&afs_xvcache
);
3184 * Clear the Statd flag from all vcaches
3186 * This function removes the Statd flag from all vcaches. It's used by
3187 * disconnected mode to tidy up during reconnection
3191 afs_ClearAllStatdFlag(void)
3196 ObtainWriteLock(&afs_xvcache
, 715);
3198 for (i
= 0; i
< VCSIZE
; i
++) {
3199 for (tvc
= afs_vhashT
[i
]; tvc
; tvc
= tvc
->hnext
) {
3200 afs_StaleVCacheFlags(tvc
, AFS_STALEVC_NODNLC
| AFS_STALEVC_NOCB
,
3204 ReleaseWriteLock(&afs_xvcache
);
3208 * Mark a vcache as stale; our metadata for the relevant file may be out of
3211 * @post Any subsequent access to this vcache will cause us to fetch the
3212 * metadata for this vcache again.
3215 afs_StaleVCacheFlags(struct vcache
*avc
, afs_stalevc_flags_t flags
,
3219 int do_filename
= 0;
3221 int lock_cbhash
= 1;
3223 if ((flags
& AFS_STALEVC_NODNLC
)) {
3226 if ((flags
& AFS_STALEVC_FILENAME
)) {
3229 if ((flags
& AFS_STALEVC_CBLOCKED
)) {
3232 if ((flags
& AFS_STALEVC_NOCB
)) {
3238 ObtainWriteLock(&afs_xcbhash
, 486);
3241 afs_DequeueCallback(avc
);
3245 avc
->f
.states
&= ~cflags
;
3248 ReleaseWriteLock(&afs_xcbhash
);
3251 if ((flags
& AFS_STALEVC_SKIP_DNLC_FOR_INIT_FLUSHED
) &&
3252 (avc
->f
.states
& (CVInit
| CVFlushed
))) {
3256 if (flags
& AFS_STALEVC_CLEARCB
) {
3257 avc
->callback
= NULL
;
3261 if ((avc
->f
.fid
.Fid
.Vnode
& 1) ||
3262 AFSTOV(avc
) == NULL
|| vType(avc
) == VDIR
||
3263 (avc
->f
.states
& CForeign
)) {
3264 /* This vcache is (or could be) a directory. */
3265 osi_dnlc_purgedp(avc
);
3267 } else if (do_filename
) {
3268 osi_dnlc_purgevp(avc
);
3274 afs_SetDataVersion(struct vcache
*avc
, afs_hyper_t
*avers
)
3276 hset(avc
->f
.m
.DataVersion
, *avers
);