Import Upstream version 1.8.5
[hcoop/debian/openafs.git] / src / afs / afs_vcache.c
CommitLineData
805e021f
CE
1/*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10/*
11 * Implements:
12 * afs_FlushVCache
13 * afs_AllocCBR
14 * afs_FreeCBR
15 * afs_FlushVCBs
16 * afs_QueueVCB
17 * afs_RemoveVCB
18 * afs_NewVCache
19 * afs_FlushActiveVcaches
20 * afs_VerifyVCache2
21 * afs_WriteVCache
22 * afs_WriteVCacheDiscon
23 * afs_SimpleVStat
24 * afs_ProcessFS
25 * TellALittleWhiteLie
26 * afs_RemoteLookup
27 * afs_GetVCache
28 * afs_LookupVCache
29 * afs_GetRootVCache
30 * afs_UpdateStatus
31 * afs_FetchStatus
32 * afs_StuffVcache
33 * afs_PutVCache
34 * afs_FindVCache
35 * afs_NFSFindVCache
36 * afs_vcacheInit
37 * shutdown_vcache
38 *
39 */
40#include <afsconfig.h>
41#include "afs/param.h"
42
43#include "afs/sysincludes.h" /*Standard vendor system headers */
44#include "afsincludes.h" /*AFS-based standard headers */
45#include "afs/afs_stats.h"
46#include "afs/afs_cbqueue.h"
47#include "afs/afs_osidnlc.h"
48
49afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
50afs_int32 afs_vcount = 0; /* number of vcache in use now */
51
52#ifdef AFS_SGI_ENV
53int afsvnumbers = 0;
54#endif
55
56#ifdef AFS_SGI64_ENV
57char *makesname();
58#endif /* AFS_SGI64_ENV */
59
60/* Exported variables */
61afs_rwlock_t afs_xvcdirty; /*Lock: discon vcache dirty list mgmt */
62afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
63afs_rwlock_t afs_xvreclaim; /*Lock: entries reclaimed, not on free list */
64afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
65#if !defined(AFS_LINUX22_ENV)
66static struct vcache *freeVCList; /*Free list for stat cache entries */
67struct vcache *ReclaimedVCList; /*Reclaimed list for stat entries */
68static struct vcache *Initial_freeVCList; /*Initial list for above */
69#endif
70struct afs_q VLRU; /*vcache LRU */
71afs_int32 vcachegen = 0;
72unsigned int afs_paniconwarn = 0;
73struct vcache *afs_vhashT[VCSIZE];
74struct afs_q afs_vhashTV[VCSIZE];
75static struct afs_cbr *afs_cbrHashT[CBRSIZE];
76afs_int32 afs_bulkStatsLost;
77int afs_norefpanic = 0;
78
79
80/* Disk backed vcache definitions
81 * Both protected by xvcache */
82static int afs_nextVcacheSlot = 0;
83static struct afs_slotlist *afs_freeSlotList = NULL;
84
85/* Forward declarations */
86static afs_int32 afs_QueueVCB(struct vcache *avc, int *slept);
87
88
89/*
90 * The PFlush algorithm makes use of the fact that Fid.Unique is not used in
91 * below hash algorithms. Change it if need be so that flushing algorithm
92 * doesn't move things from one hash chain to another.
93 */
94/* Don't hash on the cell; our callback-breaking code sometimes fails to compute
95 * the cell correctly, and only scans one hash bucket. */
96int VCHash(struct VenusFid *fid)
97{
98 return opr_jhash_int2(fid->Fid.Volume, fid->Fid.Vnode, 0) &
99 opr_jhash_mask(VCSIZEBITS);
100}
101/* Hash only on volume to speed up volume callbacks. */
102int VCHashV(struct VenusFid *fid)
103{
104 return opr_jhash_int(fid->Fid.Volume, 0) & opr_jhash_mask(VCSIZEBITS);
105}
106
107/*!
108 * Generate an index into the hash table for a given Fid.
109 * \param fid
110 * \return The hash value.
111 */
112static int
113afs_HashCBRFid(struct AFSFid *fid)
114{
115 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
116}
117
118/*!
119 * Insert a CBR entry into the hash table.
120 * Must be called with afs_xvcb held.
121 * \param cbr
122 * \return
123 */
124static void
125afs_InsertHashCBR(struct afs_cbr *cbr)
126{
127 int slot = afs_HashCBRFid(&cbr->fid);
128
129 cbr->hash_next = afs_cbrHashT[slot];
130 if (afs_cbrHashT[slot])
131 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
132
133 cbr->hash_pprev = &afs_cbrHashT[slot];
134 afs_cbrHashT[slot] = cbr;
135}
136
137/*!
138 *
139 * Flush the given vcache entry.
140 *
141 * Environment:
142 * afs_xvcache lock must be held for writing upon entry to
143 * prevent people from changing the vrefCount field, and to
144 * protect the lruq and hnext fields.
145 * LOCK: afs_FlushVCache afs_xvcache W
146 * REFCNT: vcache ref count must be zero on entry except for osf1
147 * RACE: lock is dropped and reobtained, permitting race in caller
148 *
149 * \param avc Pointer to vcache entry to flush.
150 * \param slept Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
151 *
152 */
153int
154afs_FlushVCache(struct vcache *avc, int *slept)
155{ /*afs_FlushVCache */
156
157 afs_int32 i, code;
158 struct vcache **uvc, *wvc;
159
160 /* NOTE: We must have nothing drop afs_xvcache until we have removed all
161 * possible references to this vcache. This means all hash tables, queues,
162 * DNLC, etc. */
163
164 *slept = 0;
165 AFS_STATCNT(afs_FlushVCache);
166 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
167 ICL_TYPE_INT32, avc->f.states);
168
169 code = osi_VM_FlushVCache(avc);
170 if (code)
171 goto bad;
172
173 if (avc->f.states & CVFlushed) {
174 code = EBUSY;
175 goto bad;
176 }
177#if !defined(AFS_LINUX22_ENV)
178 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
179 refpanic("LRU vs. Free inconsistency");
180 }
181#endif
182 avc->f.states |= CVFlushed;
183 /* pull the entry out of the lruq and put it on the free list */
184 QRemove(&avc->vlruq);
185
186 /* keep track of # of files that we bulk stat'd, but never used
187 * before they got recycled.
188 */
189 if (avc->f.states & CBulkStat)
190 afs_bulkStatsLost++;
191 vcachegen++;
192 /* remove entry from the hash chain */
193 i = VCHash(&avc->f.fid);
194 uvc = &afs_vhashT[i];
195 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
196 if (avc == wvc) {
197 *uvc = avc->hnext;
198 avc->hnext = NULL;
199 break;
200 }
201 }
202
203 /* remove entry from the volume hash table */
204 QRemove(&avc->vhashq);
205
206#if defined(AFS_LINUX26_ENV)
207 {
208 struct pagewriter *pw, *store;
209 struct list_head tofree;
210
211 INIT_LIST_HEAD(&tofree);
212 spin_lock(&avc->pagewriter_lock);
213 list_for_each_entry_safe(pw, store, &avc->pagewriters, link) {
214 list_del(&pw->link);
215 /* afs_osi_Free may sleep so we need to defer it */
216 list_add_tail(&pw->link, &tofree);
217 }
218 spin_unlock(&avc->pagewriter_lock);
219 list_for_each_entry_safe(pw, store, &tofree, link) {
220 list_del(&pw->link);
221 afs_osi_Free(pw, sizeof(struct pagewriter));
222 }
223 }
224#endif
225
226 if (avc->mvid.target_root)
227 osi_FreeSmallSpace(avc->mvid.target_root);
228 avc->mvid.target_root = NULL;
229 if (avc->linkData) {
230 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
231 avc->linkData = NULL;
232 }
233#if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
234 /* OK, there are no internal vrefCounts, so there shouldn't
235 * be any more refs here. */
236 if (avc->v) {
237#ifdef AFS_DARWIN80_ENV
238 vnode_clearfsnode(AFSTOV(avc));
239 vnode_removefsref(AFSTOV(avc));
240#else
241 avc->v->v_data = NULL; /* remove from vnode */
242#endif
243 AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
244 }
245#endif
246
247#ifdef AFS_SUN511_ENV
248 if (avc->v) {
249 vn_free(avc->v);
250 avc->v = NULL;
251 }
252#elif defined(AFS_SUN510_ENV)
253 /* As we use private vnodes, cleanup is up to us */
254 vn_reinit(AFSTOV(avc));
255#endif
256 afs_FreeAllAxs(&(avc->Access));
257 afs_StaleVCacheFlags(avc, AFS_STALEVC_FILENAME, CUnique);
258
259 /* By this point, the vcache has been removed from all global structures
260 * via which someone could try to use the vcache. It is okay to drop
261 * afs_xvcache at this point (if *slept is set). */
262
263 if (afs_shuttingdown == AFS_RUNNING)
264 afs_QueueVCB(avc, slept);
265
266 /*
267 * Next, keep track of which vnodes we've deleted for create's
268 * optimistic synchronization algorithm
269 */
270 afs_allZaps++;
271 if (avc->f.fid.Fid.Vnode & 1)
272 afs_oddZaps++;
273 else
274 afs_evenZaps++;
275
276 afs_vcount--;
277#if !defined(AFS_LINUX22_ENV)
278 /* put the entry in the free list */
279 avc->nextfree = freeVCList;
280 freeVCList = avc;
281 if (avc->vlruq.prev || avc->vlruq.next) {
282 refpanic("LRU vs. Free inconsistency");
283 }
284 avc->f.states |= CVFlushed;
285#else
286 /* This should put it back on the vnode free list since usecount is 1 */
287 vSetType(avc, VREG);
288 if (VREFCOUNT_GT(avc,0)) {
289 AFS_RELE(AFSTOV(avc));
290 afs_stats_cmperf.vcacheXAllocs--;
291 } else {
292 if (afs_norefpanic) {
293 afs_warn("flush vc refcnt < 1");
294 afs_norefpanic++;
295 } else
296 osi_Panic("flush vc refcnt < 1");
297 }
298#endif /* AFS_LINUX22_ENV */
299 return 0;
300
301 bad:
302 return code;
303} /*afs_FlushVCache */
304
305#ifndef AFS_SGI_ENV
306/*!
307 * The core of the inactive vnode op for all but IRIX.
308 *
309 * \param avc
310 * \param acred
311 */
312void
313afs_InactiveVCache(struct vcache *avc, afs_ucred_t *acred)
314{
315 AFS_STATCNT(afs_inactive);
316 if (avc->f.states & CDirty) {
317 /* we can't keep trying to push back dirty data forever. Give up. */
318 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
319 }
320 avc->f.states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
321 avc->f.states &= ~CDirty; /* Turn it off */
322 if (avc->f.states & CUnlinked) {
323 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
324 avc->f.states |= CUnlinkedDel;
325 return;
326 }
327 afs_remunlink(avc, 1); /* ignore any return code */
328 }
329
330}
331#endif
332
333/*!
334 * Allocate a callback return structure from the
335 * free list and return it.
336 *
337 * Environment: The alloc and free routines are both called with the afs_xvcb lock
338 * held, so we don't have to worry about blocking in osi_Alloc.
339 *
340 * \return The allocated afs_cbr.
341 */
342static struct afs_cbr *afs_cbrSpace = 0;
343/* if alloc limit below changes, fix me! */
344static struct afs_cbr *afs_cbrHeads[16];
345struct afs_cbr *
346afs_AllocCBR(void)
347{
348 struct afs_cbr *tsp;
349 int i;
350
351 while (!afs_cbrSpace) {
352 if (afs_stats_cmperf.CallBackAlloced >= sizeof(afs_cbrHeads)/sizeof(afs_cbrHeads[0])) {
353 /* don't allocate more than 16 * AFS_NCBRS for now */
354 afs_FlushVCBs(0);
355 afs_stats_cmperf.CallBackFlushes++;
356 } else {
357 /* try allocating */
358 tsp = afs_osi_Alloc(AFS_NCBRS * sizeof(struct afs_cbr));
359 osi_Assert(tsp != NULL);
360 for (i = 0; i < AFS_NCBRS - 1; i++) {
361 tsp[i].next = &tsp[i + 1];
362 }
363 tsp[AFS_NCBRS - 1].next = 0;
364 afs_cbrSpace = tsp;
365 afs_cbrHeads[afs_stats_cmperf.CallBackAlloced] = tsp;
366 afs_stats_cmperf.CallBackAlloced++;
367 }
368 }
369 tsp = afs_cbrSpace;
370 afs_cbrSpace = tsp->next;
371 return tsp;
372}
373
374/*!
375 * Free a callback return structure, removing it from all lists.
376 *
377 * Environment: the xvcb lock is held over these calls.
378 *
379 * \param asp The address of the structure to free.
380 *
381 * \rerurn 0
382 */
383int
384afs_FreeCBR(struct afs_cbr *asp)
385{
386 *(asp->pprev) = asp->next;
387 if (asp->next)
388 asp->next->pprev = asp->pprev;
389
390 *(asp->hash_pprev) = asp->hash_next;
391 if (asp->hash_next)
392 asp->hash_next->hash_pprev = asp->hash_pprev;
393
394 asp->next = afs_cbrSpace;
395 afs_cbrSpace = asp;
396 return 0;
397}
398
399static void
400FlushAllVCBs(int nconns, struct rx_connection **rxconns,
401 struct afs_conn **conns)
402{
403 afs_int32 *results;
404 afs_int32 i;
405
406 results = afs_osi_Alloc(nconns * sizeof (afs_int32));
407 osi_Assert(results != NULL);
408
409 AFS_GUNLOCK();
410 multi_Rx(rxconns,nconns)
411 {
412 multi_RXAFS_GiveUpAllCallBacks();
413 results[multi_i] = multi_error;
414 } multi_End;
415 AFS_GLOCK();
416
417 /*
418 * Freeing the CBR will unlink it from the server's CBR list
419 * do it here, not in the loop, because a dynamic CBR will call
420 * into the memory management routines.
421 */
422 for ( i = 0 ; i < nconns ; i++ ) {
423 if (results[i] == 0) {
424 /* Unchain all of them */
425 while (conns[i]->parent->srvr->server->cbrs)
426 afs_FreeCBR(conns[i]->parent->srvr->server->cbrs);
427 }
428 }
429 afs_osi_Free(results, nconns * sizeof(afs_int32));
430}
431
432/*!
433 * Flush all queued callbacks to all servers.
434 *
435 * Environment: holds xvcb lock over RPC to guard against race conditions
436 * when a new callback is granted for the same file later on.
437 *
438 * \return 0 for success.
439 */
440afs_int32
441afs_FlushVCBs(afs_int32 lockit)
442{
443 struct AFSFid *tfids;
444 struct AFSCallBack callBacks[1];
445 struct AFSCBFids fidArray;
446 struct AFSCBs cbArray;
447 afs_int32 code;
448 struct afs_cbr *tcbrp;
449 int tcount;
450 struct server *tsp;
451 int i;
452 struct vrequest *treq = NULL;
453 struct afs_conn *tc;
454 int safety1, safety2, safety3;
455 XSTATS_DECLS;
456
457 if (AFS_IS_DISCONNECTED)
458 return ENETDOWN;
459
460 if ((code = afs_CreateReq(&treq, afs_osi_credp)))
461 return code;
462 treq->flags |= O_NONBLOCK;
463 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
464 osi_Assert(tfids != NULL);
465
466 if (lockit)
467 ObtainWriteLock(&afs_xvcb, 273);
468 /*
469 * Shutting down.
470 * First, attempt a multi across everything, all addresses
471 * for all servers we know of.
472 */
473
474 if (lockit == 2)
475 afs_LoopServers(AFS_LS_ALL, NULL, 0, FlushAllVCBs, NULL);
476
477 ObtainReadLock(&afs_xserver);
478 for (i = 0; i < NSERVERS; i++) {
479 for (safety1 = 0, tsp = afs_servers[i];
480 tsp && safety1 < afs_totalServers + 10;
481 tsp = tsp->next, safety1++) {
482 /* don't have any */
483 if (tsp->cbrs == (struct afs_cbr *)0)
484 continue;
485
486 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
487 * and make an RPC, over and over again.
488 */
489 tcount = 0; /* number found so far */
490 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
491 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
492 struct rx_connection *rxconn;
493 /* if buffer is full, or we've queued all we're going
494 * to from this server, we should flush out the
495 * callbacks.
496 */
497 fidArray.AFSCBFids_len = tcount;
498 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
499 cbArray.AFSCBs_len = 1;
500 cbArray.AFSCBs_val = callBacks;
501 memset(&callBacks[0], 0, sizeof(callBacks[0]));
502 callBacks[0].CallBackType = CB_EXCLUSIVE;
503 for (safety3 = 0; safety3 < AFS_MAXHOSTS * 2; safety3++) {
504 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
505 tsp->cell->cellNum, treq, 0,
506 SHARED_LOCK, 0, &rxconn);
507 if (tc) {
508 XSTATS_START_TIME
509 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
510 RX_AFS_GUNLOCK();
511 code =
512 RXAFS_GiveUpCallBacks(rxconn, &fidArray,
513 &cbArray);
514 RX_AFS_GLOCK();
515 XSTATS_END_TIME;
516 } else
517 code = -1;
518 if (!afs_Analyze
519 (tc, rxconn, code, 0, treq,
520 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
521 tsp->cell)) {
522 break;
523 }
524 }
525 /* ignore return code, since callbacks may have
526 * been returned anyway, we shouldn't leave them
527 * around to be returned again.
528 *
529 * Next, see if we are done with this server, and if so,
530 * break to deal with the next one.
531 */
532 if (!tsp->cbrs)
533 break;
534 tcount = 0;
535 }
536 /* if to flush full buffer */
537 /* if we make it here, we have an entry at the head of cbrs,
538 * which we should copy to the file ID array and then free.
539 */
540 tcbrp = tsp->cbrs;
541 tfids[tcount++] = tcbrp->fid;
542
543 /* Freeing the CBR will unlink it from the server's CBR list */
544 afs_FreeCBR(tcbrp);
545 } /* while loop for this one server */
546 if (safety2 > afs_cacheStats) {
547 afs_warn("possible internal error afs_flushVCBs (%d)\n",
548 safety2);
549 }
550 } /* for loop for this hash chain */
551 } /* loop through all hash chains */
552 if (safety1 > afs_totalServers + 2) {
553 afs_warn
554 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
555 safety1, afs_totalServers + 2);
556 if (afs_paniconwarn)
557 osi_Panic("afs_flushVCBS safety1");
558 }
559
560 ReleaseReadLock(&afs_xserver);
561 if (lockit)
562 ReleaseWriteLock(&afs_xvcb);
563 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
564 afs_DestroyReq(treq);
565 return 0;
566}
567
568/*!
569 * Queue a callback on the given fid.
570 *
571 * Environment:
572 * Locks the xvcb lock.
573 * Called when the xvcache lock is already held.
574 * RACE: afs_xvcache may be dropped and reacquired
575 *
576 * \param avc vcache entry
577 * \param slep Set to 1 if we dropped afs_xvcache
578 * \return 1 if queued, 0 otherwise
579 */
580
581static afs_int32
582afs_QueueVCB(struct vcache *avc, int *slept)
583{
584 int queued = 0;
585 struct server *tsp;
586 struct afs_cbr *tcbp;
587 int reacquire = 0;
588
589 AFS_STATCNT(afs_QueueVCB);
590
591 ObtainWriteLock(&afs_xvcb, 274);
592
593 /* we can't really give back callbacks on RO files, since the
594 * server only tracks them on a per-volume basis, and we don't
595 * know whether we still have some other files from the same
596 * volume. */
597 if (!((avc->f.states & CRO) == 0 && avc->callback)) {
598 goto done;
599 }
600
601 /* The callback is really just a struct server ptr. */
602 tsp = (struct server *)(avc->callback);
603
604 if (!afs_cbrSpace) {
605 /* If we don't have CBR space, AllocCBR may block or hit the net for
606 * clearing up CBRs. Hitting the net may involve a fileserver
607 * needing to contact us, so we must drop xvcache so we don't block
608 * those requests from going through. */
609 reacquire = *slept = 1;
610 ReleaseWriteLock(&afs_xvcache);
611 }
612
613 /* we now have a pointer to the server, so we just allocate
614 * a queue entry and queue it.
615 */
616 tcbp = afs_AllocCBR();
617 tcbp->fid = avc->f.fid.Fid;
618
619 tcbp->next = tsp->cbrs;
620 if (tsp->cbrs)
621 tsp->cbrs->pprev = &tcbp->next;
622
623 tsp->cbrs = tcbp;
624 tcbp->pprev = &tsp->cbrs;
625
626 afs_InsertHashCBR(tcbp);
627 queued = 1;
628
629 done:
630 /* now release locks and return */
631 ReleaseWriteLock(&afs_xvcb);
632
633 if (reacquire) {
634 /* make sure this is after dropping xvcb, for locking order */
635 ObtainWriteLock(&afs_xvcache, 279);
636 }
637 return queued;
638}
639
640
641/*!
642 * Remove a queued callback for a given Fid.
643 *
644 * Environment:
645 * Locks xvcb and xserver locks.
646 * Typically called with xdcache, xvcache and/or individual vcache
647 * entries locked.
648 *
649 * \param afid The fid we want cleansed of queued callbacks.
650 *
651 */
652
653void
654afs_RemoveVCB(struct VenusFid *afid)
655{
656 int slot;
657 struct afs_cbr *cbr, *ncbr;
658
659 AFS_STATCNT(afs_RemoveVCB);
660 ObtainWriteLock(&afs_xvcb, 275);
661
662 slot = afs_HashCBRFid(&afid->Fid);
663 ncbr = afs_cbrHashT[slot];
664
665 while (ncbr) {
666 cbr = ncbr;
667 ncbr = cbr->hash_next;
668
669 if (afid->Fid.Volume == cbr->fid.Volume &&
670 afid->Fid.Vnode == cbr->fid.Vnode &&
671 afid->Fid.Unique == cbr->fid.Unique) {
672 afs_FreeCBR(cbr);
673 }
674 }
675
676 ReleaseWriteLock(&afs_xvcb);
677}
678
679void
680afs_FlushReclaimedVcaches(void)
681{
682#if !defined(AFS_LINUX22_ENV)
683 struct vcache *tvc;
684 int code, fv_slept;
685 struct vcache *tmpReclaimedVCList = NULL;
686
687 ObtainWriteLock(&afs_xvreclaim, 76);
688 while (ReclaimedVCList) {
689 tvc = ReclaimedVCList; /* take from free list */
690 ReclaimedVCList = tvc->nextfree;
691 tvc->nextfree = NULL;
692 code = afs_FlushVCache(tvc, &fv_slept);
693 if (code) {
694 /* Ok, so, if we got code != 0, uh, wtf do we do? */
695 /* Probably, build a temporary list and then put all back when we
696 get to the end of the list */
697 /* This is actually really crappy, but we need to not leak these.
698 We probably need a way to be smarter about this. */
699 tvc->nextfree = tmpReclaimedVCList;
700 tmpReclaimedVCList = tvc;
701 /* printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code); */
702 }
703 if (tvc->f.states & (CVInit
704#ifdef AFS_DARWIN80_ENV
705 | CDeadVnode
706#endif
707 )) {
708 tvc->f.states &= ~(CVInit
709#ifdef AFS_DARWIN80_ENV
710 | CDeadVnode
711#endif
712 );
713 afs_osi_Wakeup(&tvc->f.states);
714 }
715 }
716 if (tmpReclaimedVCList)
717 ReclaimedVCList = tmpReclaimedVCList;
718
719 ReleaseWriteLock(&afs_xvreclaim);
720#endif
721}
722
723void
724afs_PostPopulateVCache(struct vcache *avc, struct VenusFid *afid, int seq)
725{
726 /*
727 * The proper value for mvstat (for root fids) is setup by the caller.
728 */
729 avc->mvstat = AFS_MVSTAT_FILE;
730 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
731 avc->mvstat = AFS_MVSTAT_ROOT;
732
733 if (afs_globalVFS == 0)
734 osi_Panic("afs globalvfs");
735
736 osi_PostPopulateVCache(avc);
737
738 avc->dchint = NULL;
739 osi_dnlc_purgedp(avc); /* this may be overkill */
740 memset(&(avc->callsort), 0, sizeof(struct afs_q));
741 avc->slocks = NULL;
742 avc->f.states &=~ CVInit;
743 if (seq) {
744 avc->f.states |= CBulkFetching;
745 avc->f.m.Length = seq;
746 }
747 afs_osi_Wakeup(&avc->f.states);
748}
749
750int
751afs_ShakeLooseVCaches(afs_int32 anumber)
752{
753 afs_int32 i, loop;
754 int evicted;
755 struct vcache *tvc;
756 struct afs_q *tq, *uq;
757 int fv_slept, defersleep = 0;
758 int limit;
759 afs_int32 target = anumber;
760
761 loop = 0;
762
763 retry:
764 i = 0;
765 limit = afs_vcount;
766 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
767 tvc = QTOV(tq);
768 uq = QPrev(tq);
769 if (tvc->f.states & CVFlushed) {
770 refpanic("CVFlushed on VLRU");
771 } else if (i++ > limit) {
772 afs_warn("afs_ShakeLooseVCaches: i %d limit %d afs_vcount %d afs_maxvcount %d\n",
773 (int)i, limit, (int)afs_vcount, (int)afs_maxvcount);
774 refpanic("Found too many AFS vnodes on VLRU (VLRU cycle?)");
775 } else if (QNext(uq) != tq) {
776 refpanic("VLRU inconsistent");
777 } else if (tvc->f.states & CVInit) {
778 continue;
779 }
780
781 fv_slept = 0;
782 evicted = osi_TryEvictVCache(tvc, &fv_slept, defersleep);
783 if (evicted) {
784 anumber--;
785 }
786
787 if (fv_slept) {
788 if (loop++ > 100)
789 break;
790 if (!evicted) {
791 /*
792 * This vcache was busy and we slept while trying to evict it.
793 * Move this busy vcache to the head of the VLRU so vcaches
794 * following this busy vcache can be evicted during the retry.
795 */
796 QRemove(&tvc->vlruq);
797 QAdd(&VLRU, &tvc->vlruq);
798 }
799 goto retry; /* start over - may have raced. */
800 }
801 if (uq == &VLRU) {
802 if (anumber && !defersleep) {
803 defersleep = 1;
804 goto retry;
805 }
806 break;
807 }
808 }
809 if (!afsd_dynamic_vcaches && anumber == target) {
810 afs_warn("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
811 afs_vcount, afs_maxvcount);
812 }
813
814 return 0;
815}
816
817/* Alloc new vnode. */
818
819static struct vcache *
820afs_AllocVCache(void)
821{
822 struct vcache *tvc;
823
824 tvc = osi_NewVnode();
825
826 afs_vcount++;
827
828 /* track the peak */
829 if (afsd_dynamic_vcaches && afs_maxvcount < afs_vcount) {
830 afs_maxvcount = afs_vcount;
831 /*printf("peak vnodes: %d\n", afs_maxvcount);*/
832 }
833
834 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
835
836 /* If we create a new inode, we either give it a new slot number,
837 * or if one's available, use a slot number from the slot free list
838 */
839 if (afs_freeSlotList != NULL) {
840 struct afs_slotlist *tmp;
841
842 tvc->diskSlot = afs_freeSlotList->slot;
843 tmp = afs_freeSlotList;
844 afs_freeSlotList = tmp->next;
845 afs_osi_Free(tmp, sizeof(struct afs_slotlist));
846 } else {
847 tvc->diskSlot = afs_nextVcacheSlot++;
848 }
849
850 return tvc;
851}
852
853/* Pre populate a newly allocated vcache. On platforms where the actual
854 * vnode is attached to the vcache, this function is called before attachment,
855 * therefore it cannot perform any actions on the vnode itself */
856
857static void
858afs_PrePopulateVCache(struct vcache *avc, struct VenusFid *afid,
859 struct server *serverp) {
860
861 afs_uint32 slot;
862 afs_hyper_t zero;
863 slot = avc->diskSlot;
864
865 osi_PrePopulateVCache(avc);
866
867 avc->diskSlot = slot;
868 QZero(&avc->metadirty);
869
870 AFS_RWLOCK_INIT(&avc->lock, "vcache lock");
871
872 memset(&avc->mvid, 0, sizeof(avc->mvid));
873 avc->linkData = NULL;
874 avc->cbExpires = 0;
875 avc->opens = 0;
876 avc->execsOrWriters = 0;
877 avc->flockCount = 0;
878 avc->f.states = CVInit;
879 avc->last_looker = 0;
880 avc->f.fid = *afid;
881 avc->asynchrony = -1;
882 avc->vc_error = 0;
883
884 hzero(avc->mapDV);
885 avc->f.truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
886 hzero(zero);
887 afs_SetDataVersion(avc, &zero); /* in case we copy it into flushDV */
888 avc->Access = NULL;
889 avc->callback = serverp; /* to minimize chance that clear
890 * request is lost */
891
892#if defined(AFS_CACHE_BYPASS)
893 avc->cachingStates = 0;
894 avc->cachingTransitions = 0;
895#endif
896}
897
898void
899afs_FlushAllVCaches(void)
900{
901 int i;
902 struct vcache *tvc, *nvc;
903
904 ObtainWriteLock(&afs_xvcache, 867);
905
906 retry:
907 for (i = 0; i < VCSIZE; i++) {
908 for (tvc = afs_vhashT[i]; tvc; tvc = nvc) {
909 int slept;
910
911 nvc = tvc->hnext;
912 if (afs_FlushVCache(tvc, &slept)) {
913 afs_warn("Failed to flush vcache 0x%lx\n", (unsigned long)(uintptrsz)tvc);
914 }
915 if (slept) {
916 goto retry;
917 }
918 }
919 }
920
921 ReleaseWriteLock(&afs_xvcache);
922}
923
924/*!
925 * This routine is responsible for allocating a new cache entry
926 * from the free list. It formats the cache entry and inserts it
927 * into the appropriate hash tables. It must be called with
928 * afs_xvcache write-locked so as to prevent several processes from
929 * trying to create a new cache entry simultaneously.
930 *
931 * LOCK: afs_NewVCache afs_xvcache W
932 *
933 * \param afid The file id of the file whose cache entry is being created.
934 *
935 * \return The new vcache struct.
936 */
937
938static_inline struct vcache *
939afs_NewVCache_int(struct VenusFid *afid, struct server *serverp, int seq)
940{
941 struct vcache *tvc;
942 afs_int32 i, j;
943 afs_int32 anumber = VCACHE_FREE;
944
945 AFS_STATCNT(afs_NewVCache);
946
947 afs_FlushReclaimedVcaches();
948
949#if defined(AFS_LINUX22_ENV)
950 if(!afsd_dynamic_vcaches && afs_vcount >= afs_maxvcount) {
951 afs_ShakeLooseVCaches(anumber);
952 if (afs_vcount >= afs_maxvcount) {
953 afs_warn("afs_NewVCache - none freed\n");
954 return NULL;
955 }
956 }
957 tvc = afs_AllocVCache();
958#else /* AFS_LINUX22_ENV */
959 /* pull out a free cache entry */
960 if (!freeVCList) {
961 afs_ShakeLooseVCaches(anumber);
962 }
963
964 if (!freeVCList) {
965 tvc = afs_AllocVCache();
966 } else {
967 tvc = freeVCList; /* take from free list */
968 freeVCList = tvc->nextfree;
969 tvc->nextfree = NULL;
970 afs_vcount++; /* balanced by FlushVCache */
971 } /* end of if (!freeVCList) */
972
973#endif /* AFS_LINUX22_ENV */
974
975#if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
976 if (tvc->v)
977 panic("afs_NewVCache(): free vcache with vnode attached");
978#endif
979
980 /* Populate the vcache with as much as we can. */
981 afs_PrePopulateVCache(tvc, afid, serverp);
982
983 /* Thread the vcache onto the VLRU */
984
985 i = VCHash(afid);
986 j = VCHashV(afid);
987
988 tvc->hnext = afs_vhashT[i];
989 afs_vhashT[i] = tvc;
990 QAdd(&afs_vhashTV[j], &tvc->vhashq);
991
992 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
993 refpanic("NewVCache VLRU inconsistent");
994 }
995 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
996 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
997 refpanic("NewVCache VLRU inconsistent2");
998 }
999 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1000 refpanic("NewVCache VLRU inconsistent3");
1001 }
1002 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1003 refpanic("NewVCache VLRU inconsistent4");
1004 }
1005 vcachegen++;
1006
1007 /* it should now be safe to drop the xvcache lock - so attach an inode
1008 * to this vcache, where necessary */
1009 osi_AttachVnode(tvc, seq);
1010
1011 /* Get a reference count to hold this vcache for the VLRUQ. Note that
1012 * we have to do this after attaching the vnode, because the reference
1013 * count may be held in the vnode itself */
1014
1015#if defined(AFS_LINUX22_ENV)
1016 /* Hold it for the LRU (should make count 2) */
1017 AFS_FAST_HOLD(tvc);
1018#elif !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
1019 VREFCOUNT_SET(tvc, 1); /* us */
1020#endif
1021
1022#if defined (AFS_FBSD_ENV)
1023 if (tvc->f.states & CVInit)
1024#endif
1025 afs_PostPopulateVCache(tvc, afid, seq);
1026
1027 return tvc;
1028} /*afs_NewVCache */
1029
1030
1031struct vcache *
1032afs_NewVCache(struct VenusFid *afid, struct server *serverp)
1033{
1034 return afs_NewVCache_int(afid, serverp, 0);
1035}
1036
1037struct vcache *
1038afs_NewBulkVCache(struct VenusFid *afid, struct server *serverp, int seq)
1039{
1040 return afs_NewVCache_int(afid, serverp, seq);
1041}
1042
1043/*!
1044 * ???
1045 *
1046 * LOCK: afs_FlushActiveVcaches afs_xvcache N
1047 *
1048 * \param doflocks : Do we handle flocks?
1049 */
1050void
1051afs_FlushActiveVcaches(afs_int32 doflocks)
1052{
1053 struct vcache *tvc;
1054 int i;
1055 struct afs_conn *tc;
1056 afs_int32 code;
1057 afs_ucred_t *cred = NULL;
1058 struct vrequest *treq = NULL;
1059 struct AFSVolSync tsync;
1060 int didCore;
1061 XSTATS_DECLS;
1062 AFS_STATCNT(afs_FlushActiveVcaches);
1063
1064 code = afs_CreateReq(&treq, afs_osi_credp);
1065 if (code) {
1066 afs_warn("unable to alloc treq\n");
1067 return;
1068 }
1069
1070 ObtainReadLock(&afs_xvcache);
1071 for (i = 0; i < VCSIZE; i++) {
1072 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1073 if (tvc->f.states & CVInit) continue;
1074#ifdef AFS_DARWIN80_ENV
1075 if (tvc->f.states & CDeadVnode &&
1076 (tvc->f.states & (CCore|CUnlinkedDel) ||
1077 tvc->flockCount)) panic("Dead vnode has core/unlinkedel/flock");
1078#endif
1079 if (doflocks && tvc->flockCount != 0) {
1080 struct rx_connection *rxconn;
1081 /* if this entry has an flock, send a keep-alive call out */
1082 osi_vnhold(tvc, 0);
1083 ReleaseReadLock(&afs_xvcache);
1084 ObtainWriteLock(&tvc->lock, 51);
1085 do {
1086 code = afs_InitReq(treq, afs_osi_credp);
1087 if (code) {
1088 code = -1;
1089 break; /* shutting down: do not try to extend the lock */
1090 }
1091 treq->flags |= O_NONBLOCK;
1092
1093 tc = afs_Conn(&tvc->f.fid, treq, SHARED_LOCK, &rxconn);
1094 if (tc) {
1095 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1096 RX_AFS_GUNLOCK();
1097 code =
1098 RXAFS_ExtendLock(rxconn,
1099 (struct AFSFid *)&tvc->f.fid.Fid,
1100 &tsync);
1101 RX_AFS_GLOCK();
1102 XSTATS_END_TIME;
1103 } else
1104 code = -1;
1105 } while (afs_Analyze
1106 (tc, rxconn, code, &tvc->f.fid, treq,
1107 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1108
1109 ReleaseWriteLock(&tvc->lock);
1110#ifdef AFS_DARWIN80_ENV
1111 AFS_FAST_RELE(tvc);
1112 ObtainReadLock(&afs_xvcache);
1113#else
1114 ObtainReadLock(&afs_xvcache);
1115 AFS_FAST_RELE(tvc);
1116#endif
1117 }
1118 didCore = 0;
1119 if ((tvc->f.states & CCore) || (tvc->f.states & CUnlinkedDel)) {
1120 /*
1121 * Don't let it evaporate in case someone else is in
1122 * this code. Also, drop the afs_xvcache lock while
1123 * getting vcache locks.
1124 */
1125 osi_vnhold(tvc, 0);
1126 ReleaseReadLock(&afs_xvcache);
1127#if defined(AFS_SGI_ENV)
1128 /*
1129 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1130 */
1131 osi_Assert(VREFCOUNT_GT(tvc,0));
1132 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1133#endif
1134 ObtainWriteLock(&tvc->lock, 52);
1135 if (tvc->f.states & CCore) {
1136 tvc->f.states &= ~CCore;
1137 /* XXXX Find better place-holder for cred XXXX */
1138 cred = (afs_ucred_t *)tvc->linkData;
1139 tvc->linkData = NULL; /* XXX */
1140 code = afs_InitReq(treq, cred);
1141 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1142 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1143 tvc->execsOrWriters);
1144 if (!code) { /* avoid store when shutting down */
1145 code = afs_StoreOnLastReference(tvc, treq);
1146 }
1147 ReleaseWriteLock(&tvc->lock);
1148 hzero(tvc->flushDV);
1149 osi_FlushText(tvc);
1150 didCore = 1;
1151 if (code && code != VNOVNODE) {
1152 afs_StoreWarn(code, tvc->f.fid.Fid.Volume,
1153 /* /dev/console */ 1);
1154 }
1155 } else if (tvc->f.states & CUnlinkedDel) {
1156 /*
1157 * Ignore errors
1158 */
1159 ReleaseWriteLock(&tvc->lock);
1160#if defined(AFS_SGI_ENV)
1161 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1162#endif
1163 afs_remunlink(tvc, 0);
1164#if defined(AFS_SGI_ENV)
1165 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1166#endif
1167 } else {
1168 /* lost (or won, perhaps) the race condition */
1169 ReleaseWriteLock(&tvc->lock);
1170 }
1171#if defined(AFS_SGI_ENV)
1172 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1173#endif
1174#ifdef AFS_DARWIN80_ENV
1175 AFS_FAST_RELE(tvc);
1176 if (didCore) {
1177 AFS_RELE(AFSTOV(tvc));
1178 /* Matches write code setting CCore flag */
1179 crfree(cred);
1180 }
1181 ObtainReadLock(&afs_xvcache);
1182#else
1183 ObtainReadLock(&afs_xvcache);
1184 AFS_FAST_RELE(tvc);
1185 if (didCore) {
1186 AFS_RELE(AFSTOV(tvc));
1187 /* Matches write code setting CCore flag */
1188 crfree(cred);
1189 }
1190#endif
1191 }
1192 }
1193 }
1194 ReleaseReadLock(&afs_xvcache);
1195 afs_DestroyReq(treq);
1196}
1197
1198
1199
1200/*!
1201 * Make sure a cache entry is up-to-date status-wise.
1202 *
1203 * NOTE: everywhere that calls this can potentially be sped up
1204 * by checking CStatd first, and avoiding doing the InitReq
1205 * if this is up-to-date.
1206 *
1207 * Anymore, the only places that call this KNOW already that the
1208 * vcache is not up-to-date, so we don't screw around.
1209 *
1210 * \param avc : Ptr to vcache entry to verify.
1211 * \param areq : ???
1212 */
1213
1214/*!
1215 *
1216 * Make sure a cache entry is up-to-date status-wise.
1217 *
1218 * NOTE: everywhere that calls this can potentially be sped up
1219 * by checking CStatd first, and avoiding doing the InitReq
1220 * if this is up-to-date.
1221 *
1222 * Anymore, the only places that call this KNOW already that the
1223 * vcache is not up-to-date, so we don't screw around.
1224 *
1225 * \param avc Pointer to vcache entry to verify.
1226 * \param areq
1227 *
1228 * \return 0 for success or other error codes.
1229 */
1230int
1231afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1232{
1233 struct vcache *tvc;
1234
1235 AFS_STATCNT(afs_VerifyVCache);
1236
1237 /* otherwise we must fetch the status info */
1238
1239 ObtainWriteLock(&avc->lock, 53);
1240 if (avc->f.states & CStatd) {
1241 ReleaseWriteLock(&avc->lock);
1242 return 0;
1243 }
1244 afs_StaleVCacheFlags(avc, AFS_STALEVC_FILENAME | AFS_STALEVC_CLEARCB,
1245 CUnique);
1246 ReleaseWriteLock(&avc->lock);
1247
1248 /* fetch the status info */
1249 tvc = afs_GetVCache(&avc->f.fid, areq, NULL, avc);
1250 if (!tvc)
1251 return EIO;
1252 /* Put it back; caller has already incremented vrefCount */
1253 afs_PutVCache(tvc);
1254 return 0;
1255
1256} /*afs_VerifyVCache */
1257
1258
1259/*!
1260 * Simple copy of stat info into cache.
1261 *
1262 * Callers:as of 1992-04-29, only called by WriteVCache
1263 *
1264 * \param avc Ptr to vcache entry involved.
1265 * \param astat Ptr to stat info to copy.
1266 *
1267 */
1268static void
1269afs_SimpleVStat(struct vcache *avc,
1270 struct AFSFetchStatus *astat, struct vrequest *areq)
1271{
1272 afs_size_t length;
1273 AFS_STATCNT(afs_SimpleVStat);
1274
1275#ifdef AFS_64BIT_CLIENT
1276 FillInt64(length, astat->Length_hi, astat->Length);
1277#else /* AFS_64BIT_CLIENT */
1278 length = astat->Length;
1279#endif /* AFS_64BIT_CLIENT */
1280
1281#if defined(AFS_SGI_ENV)
1282 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1283 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1284 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1285 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1286 if (length < avc->f.m.Length) {
1287 vnode_t *vp = (vnode_t *) avc;
1288
1289 osi_Assert(WriteLocked(&avc->lock));
1290 ReleaseWriteLock(&avc->lock);
1291 AFS_GUNLOCK();
1292 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1293 AFS_GLOCK();
1294 ObtainWriteLock(&avc->lock, 67);
1295 }
1296 }
1297#endif
1298
1299 if (!afs_DirtyPages(avc)) {
1300 /* if actively writing the file, don't fetch over this value */
1301 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1302 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1303 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1304 avc->f.m.Length = length;
1305 avc->f.m.Date = astat->ClientModTime;
1306 }
1307 avc->f.m.Owner = astat->Owner;
1308 avc->f.m.Group = astat->Group;
1309 avc->f.m.Mode = astat->UnixModeBits;
1310 if (vType(avc) == VREG) {
1311 avc->f.m.Mode |= S_IFREG;
1312 } else if (vType(avc) == VDIR) {
1313 avc->f.m.Mode |= S_IFDIR;
1314 } else if (vType(avc) == VLNK) {
1315 avc->f.m.Mode |= S_IFLNK;
1316 if ((avc->f.m.Mode & 0111) == 0)
1317 avc->mvstat = AFS_MVSTAT_MTPT;
1318 }
1319 if (avc->f.states & CForeign) {
1320 struct axscache *ac;
1321 avc->f.anyAccess = astat->AnonymousAccess;
1322#ifdef badidea
1323 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1324 /* USED TO SAY :
1325 * Caller has at least one bit not covered by anonymous, and
1326 * thus may have interesting rights.
1327 *
1328 * HOWEVER, this is a really bad idea, because any access query
1329 * for bits which aren't covered by anonymous, on behalf of a user
1330 * who doesn't have any special rights, will result in an answer of
1331 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1332 * It's an especially bad idea under Ultrix, since (due to the lack of
1333 * a proper access() call) it must perform several afs_access() calls
1334 * in order to create magic mode bits that vary according to who makes
1335 * the call. In other words, _every_ stat() generates a test for
1336 * writeability...
1337 */
1338#endif /* badidea */
1339 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1340 ac->axess = astat->CallerAccess;
1341 else /* not found, add a new one if possible */
1342 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1343 }
1344
1345} /*afs_SimpleVStat */
1346
1347
1348/*!
1349 * Store the status info *only* back to the server for a
1350 * fid/vrequest.
1351 *
1352 * Environment: Must be called with a shared lock held on the vnode.
1353 *
1354 * \param avc Ptr to the vcache entry.
1355 * \param astatus Ptr to the status info to store.
1356 * \param areq Ptr to the associated vrequest.
1357 *
1358 * \return Operation status.
1359 */
1360
1361int
1362afs_WriteVCache(struct vcache *avc,
1363 struct AFSStoreStatus *astatus,
1364 struct vrequest *areq)
1365{
1366 afs_int32 code;
1367 struct afs_conn *tc;
1368 struct AFSFetchStatus OutStatus;
1369 struct AFSVolSync tsync;
1370 struct rx_connection *rxconn;
1371 XSTATS_DECLS;
1372 AFS_STATCNT(afs_WriteVCache);
1373 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1374 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
1375 do {
1376 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
1377 if (tc) {
1378 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1379 RX_AFS_GUNLOCK();
1380 code =
1381 RXAFS_StoreStatus(rxconn, (struct AFSFid *)&avc->f.fid.Fid,
1382 astatus, &OutStatus, &tsync);
1383 RX_AFS_GLOCK();
1384 XSTATS_END_TIME;
1385 } else
1386 code = -1;
1387 } while (afs_Analyze
1388 (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1389 SHARED_LOCK, NULL));
1390
1391 UpgradeSToWLock(&avc->lock, 20);
1392 if (code == 0) {
1393 /* success, do the changes locally */
1394 afs_SimpleVStat(avc, &OutStatus, areq);
1395 /*
1396 * Update the date, too. SimpleVStat didn't do this, since
1397 * it thought we were doing this after fetching new status
1398 * over a file being written.
1399 */
1400 avc->f.m.Date = OutStatus.ClientModTime;
1401 } else {
1402 /* failure, set up to check with server next time */
1403 afs_StaleVCacheFlags(avc, 0, CUnique);
1404 }
1405 ConvertWToSLock(&avc->lock);
1406 return code;
1407
1408} /*afs_WriteVCache */
1409
1410/*!
1411 * Store status info only locally, set the proper disconnection flags
1412 * and add to dirty list.
1413 *
1414 * \param avc The vcache to be written locally.
1415 * \param astatus Get attr fields from local store.
1416 * \param attrs This one is only of the vs_size.
1417 *
1418 * \note Must be called with a shared lock on the vnode
1419 */
1420int
1421afs_WriteVCacheDiscon(struct vcache *avc,
1422 struct AFSStoreStatus *astatus,
1423 struct vattr *attrs)
1424{
1425 afs_int32 code = 0;
1426 afs_int32 flags = 0;
1427
1428 UpgradeSToWLock(&avc->lock, 700);
1429
1430 if (!astatus->Mask) {
1431
1432 return code;
1433
1434 } else {
1435
1436 /* Set attributes. */
1437 if (astatus->Mask & AFS_SETMODTIME) {
1438 avc->f.m.Date = astatus->ClientModTime;
1439 flags |= VDisconSetTime;
1440 }
1441
1442 if (astatus->Mask & AFS_SETOWNER) {
1443 /* printf("Not allowed yet. \n"); */
1444 /*avc->f.m.Owner = astatus->Owner;*/
1445 }
1446
1447 if (astatus->Mask & AFS_SETGROUP) {
1448 /* printf("Not allowed yet. \n"); */
1449 /*avc->f.m.Group = astatus->Group;*/
1450 }
1451
1452 if (astatus->Mask & AFS_SETMODE) {
1453 avc->f.m.Mode = astatus->UnixModeBits;
1454
1455#if 0 /* XXX: Leaving this out, so it doesn't mess up the file type flag.*/
1456
1457 if (vType(avc) == VREG) {
1458 avc->f.m.Mode |= S_IFREG;
1459 } else if (vType(avc) == VDIR) {
1460 avc->f.m.Mode |= S_IFDIR;
1461 } else if (vType(avc) == VLNK) {
1462 avc->f.m.Mode |= S_IFLNK;
1463 if ((avc->f.m.Mode & 0111) == 0)
1464 avc->mvstat = AFS_MVSTAT_MTPT;
1465 }
1466#endif
1467 flags |= VDisconSetMode;
1468 } /* if(astatus.Mask & AFS_SETMODE) */
1469
1470 } /* if (!astatus->Mask) */
1471
1472 if (attrs->va_size > 0) {
1473 /* XXX: Do I need more checks? */
1474 /* Truncation operation. */
1475 flags |= VDisconTrunc;
1476 }
1477
1478 if (flags)
1479 afs_DisconAddDirty(avc, flags, 1);
1480
1481 /* XXX: How about the rest of the fields? */
1482
1483 ConvertWToSLock(&avc->lock);
1484
1485 return code;
1486}
1487
1488/*!
1489 * Copy astat block into vcache info
1490 *
1491 * \note This code may get dataversion and length out of sync if the file has
1492 * been modified. This is less than ideal. I haven't thought about it sufficiently
1493 * to be certain that it is adequate.
1494 *
1495 * \note Environment: Must be called under a write lock
1496 *
1497 * \param avc Ptr to vcache entry.
1498 * \param astat Ptr to stat block to copy in.
1499 * \param areq Ptr to associated request.
1500 */
1501void
1502afs_ProcessFS(struct vcache *avc,
1503 struct AFSFetchStatus *astat, struct vrequest *areq)
1504{
1505 afs_size_t length;
1506 afs_hyper_t newDV;
1507 AFS_STATCNT(afs_ProcessFS);
1508
1509#ifdef AFS_64BIT_CLIENT
1510 FillInt64(length, astat->Length_hi, astat->Length);
1511#else /* AFS_64BIT_CLIENT */
1512 length = astat->Length;
1513#endif /* AFS_64BIT_CLIENT */
1514 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1515 * number for each bulk status request. Under no circumstances
1516 * should afs_DoBulkStat store a sequence number if the new
1517 * length will be ignored when afs_ProcessFS is called with
1518 * new stats. If you change the following conditional then you
1519 * also need to change the conditional in afs_DoBulkStat. */
1520#ifdef AFS_SGI_ENV
1521 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1522 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1523#else
1524 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1525#endif
1526 /* if we're writing or mapping this file, don't fetch over these
1527 * values.
1528 */
1529 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1530 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1531 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1532 avc->f.m.Length = length;
1533 avc->f.m.Date = astat->ClientModTime;
1534 }
1535 hset64(newDV, astat->dataVersionHigh, astat->DataVersion);
1536 afs_SetDataVersion(avc, &newDV);
1537 avc->f.m.Owner = astat->Owner;
1538 avc->f.m.Mode = astat->UnixModeBits;
1539 avc->f.m.Group = astat->Group;
1540 avc->f.m.LinkCount = astat->LinkCount;
1541 if (astat->FileType == File) {
1542 vSetType(avc, VREG);
1543 avc->f.m.Mode |= S_IFREG;
1544 } else if (astat->FileType == Directory) {
1545 vSetType(avc, VDIR);
1546 avc->f.m.Mode |= S_IFDIR;
1547 } else if (astat->FileType == SymbolicLink) {
1548 if (afs_fakestat_enable && (avc->f.m.Mode & 0111) == 0) {
1549 vSetType(avc, VDIR);
1550 avc->f.m.Mode |= S_IFDIR;
1551 } else {
1552 vSetType(avc, VLNK);
1553 avc->f.m.Mode |= S_IFLNK;
1554 }
1555 if ((avc->f.m.Mode & 0111) == 0) {
1556 avc->mvstat = AFS_MVSTAT_MTPT;
1557 }
1558 }
1559 avc->f.anyAccess = astat->AnonymousAccess;
1560#ifdef badidea
1561 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1562 /* USED TO SAY :
1563 * Caller has at least one bit not covered by anonymous, and
1564 * thus may have interesting rights.
1565 *
1566 * HOWEVER, this is a really bad idea, because any access query
1567 * for bits which aren't covered by anonymous, on behalf of a user
1568 * who doesn't have any special rights, will result in an answer of
1569 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1570 * It's an especially bad idea under Ultrix, since (due to the lack of
1571 * a proper access() call) it must perform several afs_access() calls
1572 * in order to create magic mode bits that vary according to who makes
1573 * the call. In other words, _every_ stat() generates a test for
1574 * writeability...
1575 */
1576#endif /* badidea */
1577 {
1578 struct axscache *ac;
1579 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1580 ac->axess = astat->CallerAccess;
1581 else /* not found, add a new one if possible */
1582 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1583 }
1584} /*afs_ProcessFS */
1585
1586
1587/*!
1588 * Get fid from server.
1589 *
1590 * \param afid
1591 * \param areq Request to be passed on.
1592 * \param name Name of ?? to lookup.
1593 * \param OutStatus Fetch status.
1594 * \param CallBackp
1595 * \param serverp
1596 * \param tsyncp
1597 *
1598 * \return Success status of operation.
1599 */
1600int
1601afs_RemoteLookup(struct VenusFid *afid, struct vrequest *areq,
1602 char *name, struct VenusFid *nfid,
1603 struct AFSFetchStatus *OutStatusp,
1604 struct AFSCallBack *CallBackp, struct server **serverp,
1605 struct AFSVolSync *tsyncp)
1606{
1607 afs_int32 code;
1608 struct afs_conn *tc;
1609 struct rx_connection *rxconn;
1610 struct AFSFetchStatus OutDirStatus;
1611 XSTATS_DECLS;
1612 if (!name)
1613 name = ""; /* XXX */
1614 do {
1615 tc = afs_Conn(afid, areq, SHARED_LOCK, &rxconn);
1616 if (tc) {
1617 if (serverp)
1618 *serverp = tc->parent->srvr->server;
1619 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1620 RX_AFS_GUNLOCK();
1621 code =
1622 RXAFS_Lookup(rxconn, (struct AFSFid *)&afid->Fid, name,
1623 (struct AFSFid *)&nfid->Fid, OutStatusp,
1624 &OutDirStatus, CallBackp, tsyncp);
1625 RX_AFS_GLOCK();
1626 XSTATS_END_TIME;
1627 } else
1628 code = -1;
1629 } while (afs_Analyze
1630 (tc, rxconn, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1631 NULL));
1632
1633 return code;
1634}
1635
1636
1637/*!
1638 * afs_GetVCache
1639 *
1640 * Given a file id and a vrequest structure, fetch the status
1641 * information associated with the file.
1642 *
1643 * \param afid File ID.
1644 * \param areq Ptr to associated vrequest structure, specifying the
1645 * user whose authentication tokens will be used.
1646 * \param avc Caller may already have a vcache for this file, which is
1647 * already held.
1648 *
1649 * \note Environment:
1650 * The cache entry is returned with an increased vrefCount field.
1651 * The entry must be discarded by calling afs_PutVCache when you
1652 * are through using the pointer to the cache entry.
1653 *
1654 * You should not hold any locks when calling this function, except
1655 * locks on other vcache entries. If you lock more than one vcache
1656 * entry simultaneously, you should lock them in this order:
1657 *
1658 * 1. Lock all files first, then directories.
1659 * 2. Within a particular type, lock entries in Fid.Vnode order.
1660 *
1661 * This locking hierarchy is convenient because it allows locking
1662 * of a parent dir cache entry, given a file (to check its access
1663 * control list). It also allows renames to be handled easily by
1664 * locking directories in a constant order.
1665 *
1666 * \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1667 *
1668 * \note Might have a vcache structure already, which must
1669 * already be held by the caller
1670 */
1671struct vcache *
1672afs_GetVCache(struct VenusFid *afid, struct vrequest *areq,
1673 afs_int32 * cached, struct vcache *avc)
1674{
1675
1676 afs_int32 code, newvcache = 0;
1677 struct vcache *tvc;
1678 struct volume *tvp;
1679 afs_int32 retry;
1680
1681 AFS_STATCNT(afs_GetVCache);
1682
1683 if (cached)
1684 *cached = 0; /* Init just in case */
1685
1686#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1687 loop:
1688#endif
1689
1690 ObtainSharedLock(&afs_xvcache, 5);
1691
1692 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU | IS_SLOCK);
1693 if (tvc && retry) {
1694#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1695 ReleaseSharedLock(&afs_xvcache);
1696 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1697 goto loop;
1698#endif
1699 }
1700 if (tvc) {
1701 if (cached)
1702 *cached = 1;
1703 osi_Assert((tvc->f.states & CVInit) == 0);
1704 /* If we are in readdir, return the vnode even if not statd */
1705 if ((tvc->f.states & CStatd) || afs_InReadDir(tvc)) {
1706 ReleaseSharedLock(&afs_xvcache);
1707 return tvc;
1708 }
1709 } else {
1710 UpgradeSToWLock(&afs_xvcache, 21);
1711
1712 /* no cache entry, better grab one */
1713 tvc = afs_NewVCache(afid, NULL);
1714 newvcache = 1;
1715
1716 ConvertWToSLock(&afs_xvcache);
1717 if (tvc == NULL)
1718 {
1719 ReleaseSharedLock(&afs_xvcache);
1720 return NULL;
1721 }
1722
1723 afs_stats_cmperf.vcacheMisses++;
1724 }
1725
1726 ReleaseSharedLock(&afs_xvcache);
1727
1728 ObtainWriteLock(&tvc->lock, 54);
1729
1730 if (tvc->f.states & CStatd) {
1731 ReleaseWriteLock(&tvc->lock);
1732 return tvc;
1733 }
1734#ifdef AFS_DARWIN80_ENV
1735/* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
1736 What about ubc? */
1737#else
1738#if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
1739 /*
1740 * XXX - I really don't like this. Should try to understand better.
1741 * It seems that sometimes, when we get called, we already hold the
1742 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1743 * We can't drop the vnode lock, because that could result in a race.
1744 * Sometimes, though, we get here and don't hold the vnode lock.
1745 * I hate code paths that sometimes hold locks and sometimes don't.
1746 * In any event, the dodge we use here is to check whether the vnode
1747 * is locked, and if it isn't, then we gain and drop it around the call
1748 * to vinvalbuf; otherwise, we leave it alone.
1749 */
1750 {
1751 struct vnode *vp = AFSTOV(tvc);
1752 int iheldthelock;
1753
1754#if defined(AFS_DARWIN_ENV)
1755 iheldthelock = VOP_ISLOCKED(vp);
1756 if (!iheldthelock)
1757 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
1758 /* this is messy. we can call fsync which will try to reobtain this */
1759 if (VTOAFS(vp) == tvc)
1760 ReleaseWriteLock(&tvc->lock);
1761 if (UBCINFOEXISTS(vp)) {
1762 vinvalbuf(vp, V_SAVE, &afs_osi_cred, current_proc(), PINOD, 0);
1763 }
1764 if (VTOAFS(vp) == tvc)
1765 ObtainWriteLock(&tvc->lock, 954);
1766 if (!iheldthelock)
1767 VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
1768#elif defined(AFS_FBSD80_ENV)
1769 iheldthelock = VOP_ISLOCKED(vp);
1770 if (!iheldthelock) {
1771 /* nosleep/sleep lock order reversal */
1772 int glocked = ISAFS_GLOCK();
1773 if (glocked)
1774 AFS_GUNLOCK();
1775 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1776 if (glocked)
1777 AFS_GLOCK();
1778 }
1779 vinvalbuf(vp, V_SAVE, PINOD, 0); /* changed late in 8.0-CURRENT */
1780 if (!iheldthelock)
1781 VOP_UNLOCK(vp, 0);
1782#elif defined(AFS_FBSD60_ENV)
1783 iheldthelock = VOP_ISLOCKED(vp, curthread);
1784 if (!iheldthelock)
1785 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1786 AFS_GUNLOCK();
1787 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
1788 AFS_GLOCK();
1789 if (!iheldthelock)
1790 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1791#elif defined(AFS_FBSD_ENV)
1792 iheldthelock = VOP_ISLOCKED(vp, curthread);
1793 if (!iheldthelock)
1794 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1795 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
1796 if (!iheldthelock)
1797 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1798#elif defined(AFS_OBSD_ENV)
1799 iheldthelock = VOP_ISLOCKED(vp, curproc);
1800 if (!iheldthelock)
1801 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1802 uvm_vnp_uncache(vp);
1803 if (!iheldthelock)
1804 VOP_UNLOCK(vp, 0, curproc);
1805#elif defined(AFS_NBSD40_ENV)
1806 iheldthelock = VOP_ISLOCKED(vp);
1807 if (!iheldthelock) {
1808 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
1809 }
1810 uvm_vnp_uncache(vp);
1811 if (!iheldthelock)
1812 VOP_UNLOCK(vp, 0);
1813#endif
1814 }
1815#endif
1816#endif
1817
1818 afs_StaleVCacheFlags(tvc, AFS_STALEVC_NODNLC | AFS_STALEVC_CLEARCB,
1819 CUnique);
1820
1821 /* It is always appropriate to throw away all the access rights? */
1822 afs_FreeAllAxs(&(tvc->Access));
1823 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1824 if (tvp) {
1825 if ((tvp->states & VForeign)) {
1826 if (newvcache)
1827 tvc->f.states |= CForeign;
1828 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1829 && (tvp->rootUnique == afid->Fid.Unique)) {
1830 tvc->mvstat = AFS_MVSTAT_ROOT;
1831 }
1832 }
1833 if (tvp->states & VRO)
1834 tvc->f.states |= CRO;
1835 if (tvp->states & VBackup)
1836 tvc->f.states |= CBackup;
1837 /* now copy ".." entry back out of volume structure, if necessary */
1838 if (tvc->mvstat == AFS_MVSTAT_ROOT && tvp->dotdot.Fid.Volume != 0) {
1839 if (!tvc->mvid.parent)
1840 tvc->mvid.parent = (struct VenusFid *)
1841 osi_AllocSmallSpace(sizeof(struct VenusFid));
1842 *tvc->mvid.parent = tvp->dotdot;
1843 }
1844 afs_PutVolume(tvp, READ_LOCK);
1845 }
1846
1847 /* stat the file */
1848 afs_RemoveVCB(afid);
1849 {
1850 struct AFSFetchStatus OutStatus;
1851
1852 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
1853 afs_ProcessFS(tvc, &OutStatus, areq);
1854 tvc->f.states |= CStatd | CUnique;
1855 tvc->f.parent.vnode = OutStatus.ParentVnode;
1856 tvc->f.parent.unique = OutStatus.ParentUnique;
1857 code = 0;
1858 } else {
1859
1860 if (AFS_IS_DISCONNECTED) {
1861 /* Nothing to do otherwise...*/
1862 code = ENETDOWN;
1863 /* printf("Network is down in afs_GetCache"); */
1864 } else
1865 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1866
1867 /* For the NFS translator's benefit, make sure
1868 * non-directory vnodes always have their parent FID set
1869 * correctly, even when created as a result of decoding an
1870 * NFS filehandle. It would be nice to also do this for
1871 * directories, but we can't because the fileserver fills
1872 * in the FID of the directory itself instead of that of
1873 * its parent.
1874 */
1875 if (!code && OutStatus.FileType != Directory &&
1876 !tvc->f.parent.vnode) {
1877 tvc->f.parent.vnode = OutStatus.ParentVnode;
1878 tvc->f.parent.unique = OutStatus.ParentUnique;
1879 /* XXX - SXW - It's conceivable we should mark ourselves
1880 * as dirty again here, incase we've been raced
1881 * out of the FetchStatus call.
1882 */
1883 }
1884 }
1885 }
1886
1887 if (code) {
1888 ReleaseWriteLock(&tvc->lock);
1889
1890 afs_PutVCache(tvc);
1891 return NULL;
1892 }
1893
1894 ReleaseWriteLock(&tvc->lock);
1895 return tvc;
1896
1897} /*afs_GetVCache */
1898
1899
1900
1901/*!
1902 * Lookup a vcache by fid. Look inside the cache first, if not
1903 * there, lookup the file on the server, and then get it's fresh
1904 * cache entry.
1905 *
1906 * \param afid
1907 * \param areq
1908 * \param cached Is element cached? If NULL, don't answer.
1909 * \param adp
1910 * \param aname
1911 *
1912 * \return The found element or NULL.
1913 */
1914struct vcache *
1915afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1916 afs_int32 * cached, struct vcache *adp, char *aname)
1917{
1918 afs_int32 code, now, newvcache = 0;
1919 struct VenusFid nfid;
1920 struct vcache *tvc;
1921 struct volume *tvp;
1922 struct AFSFetchStatus OutStatus;
1923 struct AFSCallBack CallBack;
1924 struct AFSVolSync tsync;
1925 struct server *serverp = 0;
1926 afs_int32 origCBs;
1927 afs_int32 retry;
1928
1929 AFS_STATCNT(afs_GetVCache);
1930 if (cached)
1931 *cached = 0; /* Init just in case */
1932
1933#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1934 loop1:
1935#endif
1936
1937 ObtainReadLock(&afs_xvcache);
1938 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
1939
1940 if (tvc) {
1941 ReleaseReadLock(&afs_xvcache);
1942 if (retry) {
1943#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1944 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1945 goto loop1;
1946#endif
1947 }
1948 ObtainReadLock(&tvc->lock);
1949
1950 if (tvc->f.states & CStatd) {
1951 if (cached) {
1952 *cached = 1;
1953 }
1954 ReleaseReadLock(&tvc->lock);
1955 return tvc;
1956 }
1957 tvc->f.states &= ~CUnique;
1958
1959 ReleaseReadLock(&tvc->lock);
1960 afs_PutVCache(tvc);
1961 ObtainReadLock(&afs_xvcache);
1962 }
1963 /* if (tvc) */
1964 ReleaseReadLock(&afs_xvcache);
1965
1966 /* lookup the file */
1967 nfid = *afid;
1968 now = osi_Time();
1969 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
1970
1971 if (AFS_IS_DISCONNECTED) {
1972 /* printf("Network is down in afs_LookupVcache\n"); */
1973 code = ENETDOWN;
1974 } else
1975 code =
1976 afs_RemoteLookup(&adp->f.fid, areq, aname, &nfid, &OutStatus,
1977 &CallBack, &serverp, &tsync);
1978
1979#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1980 loop2:
1981#endif
1982
1983 ObtainSharedLock(&afs_xvcache, 6);
1984 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU | IS_SLOCK/* no xstats now */ );
1985 if (tvc && retry) {
1986#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1987 ReleaseSharedLock(&afs_xvcache);
1988 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1989 goto loop2;
1990#endif
1991 }
1992
1993 if (!tvc) {
1994 /* no cache entry, better grab one */
1995 UpgradeSToWLock(&afs_xvcache, 22);
1996 tvc = afs_NewVCache(&nfid, serverp);
1997 newvcache = 1;
1998 ConvertWToSLock(&afs_xvcache);
1999 if (!tvc)
2000 {
2001 ReleaseSharedLock(&afs_xvcache);
2002 return NULL;
2003 }
2004 }
2005
2006 ReleaseSharedLock(&afs_xvcache);
2007 ObtainWriteLock(&tvc->lock, 55);
2008
2009 /* It is always appropriate to throw away all the access rights? */
2010 afs_FreeAllAxs(&(tvc->Access));
2011 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
2012 if (tvp) {
2013 if ((tvp->states & VForeign)) {
2014 if (newvcache)
2015 tvc->f.states |= CForeign;
2016 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
2017 && (tvp->rootUnique == afid->Fid.Unique))
2018 tvc->mvstat = AFS_MVSTAT_ROOT;
2019 }
2020 if (tvp->states & VRO)
2021 tvc->f.states |= CRO;
2022 if (tvp->states & VBackup)
2023 tvc->f.states |= CBackup;
2024 /* now copy ".." entry back out of volume structure, if necessary */
2025 if (tvc->mvstat == AFS_MVSTAT_ROOT && tvp->dotdot.Fid.Volume != 0) {
2026 if (!tvc->mvid.parent)
2027 tvc->mvid.parent = (struct VenusFid *)
2028 osi_AllocSmallSpace(sizeof(struct VenusFid));
2029 *tvc->mvid.parent = tvp->dotdot;
2030 }
2031 }
2032
2033 if (code) {
2034 afs_StaleVCacheFlags(tvc, 0, CUnique);
2035 if (tvp)
2036 afs_PutVolume(tvp, READ_LOCK);
2037 ReleaseWriteLock(&tvc->lock);
2038 afs_PutVCache(tvc);
2039 return NULL;
2040 }
2041
2042 ObtainWriteLock(&afs_xcbhash, 466);
2043 if (origCBs == afs_allCBs) {
2044 if (CallBack.ExpirationTime) {
2045 tvc->callback = serverp;
2046 tvc->cbExpires = CallBack.ExpirationTime + now;
2047 tvc->f.states |= CStatd | CUnique;
2048 tvc->f.states &= ~CBulkFetching;
2049 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2050 } else if (tvc->f.states & CRO) {
2051 /* adapt gives us an hour. */
2052 tvc->cbExpires = 3600 + osi_Time();
2053 /*XXX*/ tvc->f.states |= CStatd | CUnique;
2054 tvc->f.states &= ~CBulkFetching;
2055 afs_QueueCallback(tvc, CBHash(3600), tvp);
2056 } else {
2057 afs_StaleVCacheFlags(tvc,
2058 AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
2059 CUnique);
2060 }
2061 } else {
2062 afs_StaleVCacheFlags(tvc,
2063 AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
2064 CUnique);
2065 }
2066 ReleaseWriteLock(&afs_xcbhash);
2067 if (tvp)
2068 afs_PutVolume(tvp, READ_LOCK);
2069 afs_ProcessFS(tvc, &OutStatus, areq);
2070
2071 ReleaseWriteLock(&tvc->lock);
2072 return tvc;
2073
2074}
2075
2076struct vcache *
2077afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2078 afs_int32 * cached, struct volume *tvolp)
2079{
2080 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2081 afs_int32 getNewFid = 0;
2082 afs_uint32 start;
2083 struct VenusFid nfid;
2084 struct vcache *tvc;
2085 struct server *serverp = 0;
2086 struct AFSFetchStatus OutStatus;
2087 struct AFSCallBack CallBack;
2088 struct AFSVolSync tsync;
2089 int origCBs = 0;
2090#ifdef AFS_DARWIN80_ENV
2091 vnode_t tvp;
2092#endif
2093
2094 start = osi_Time();
2095
2096 newmtpt:
2097 if (!tvolp->rootVnode || getNewFid) {
2098 struct VenusFid tfid;
2099
2100 tfid = *afid;
2101 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2102 origCBs = afs_allCBs; /* ignore InitCallBackState */
2103 code =
2104 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2105 &serverp, &tsync);
2106 if (code) {
2107 return NULL;
2108 }
2109/* ReleaseReadLock(&tvolp->lock); */
2110 ObtainWriteLock(&tvolp->lock, 56);
2111 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2112 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2113 ReleaseWriteLock(&tvolp->lock);
2114/* ObtainReadLock(&tvolp->lock);*/
2115 haveStatus = 1;
2116 } else {
2117 afid->Fid.Vnode = tvolp->rootVnode;
2118 afid->Fid.Unique = tvolp->rootUnique;
2119 }
2120
2121 rootvc_loop:
2122 ObtainSharedLock(&afs_xvcache, 7);
2123 i = VCHash(afid);
2124 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2125 if (!FidCmp(&(tvc->f.fid), afid)) {
2126 if (tvc->f.states & CVInit) {
2127 ReleaseSharedLock(&afs_xvcache);
2128 afs_osi_Sleep(&tvc->f.states);
2129 goto rootvc_loop;
2130 }
2131#ifdef AFS_DARWIN80_ENV
2132 if (tvc->f.states & CDeadVnode) {
2133 ReleaseSharedLock(&afs_xvcache);
2134 afs_osi_Sleep(&tvc->f.states);
2135 goto rootvc_loop;
2136 }
2137 tvp = AFSTOV(tvc);
2138 if (vnode_get(tvp)) /* this bumps ref count */
2139 continue;
2140 if (vnode_ref(tvp)) {
2141 AFS_GUNLOCK();
2142 /* AFSTOV(tvc) may be NULL */
2143 vnode_put(tvp);
2144 AFS_GLOCK();
2145 continue;
2146 }
2147#endif
2148 break;
2149 }
2150 }
2151
2152 if (!haveStatus && (!tvc || !(tvc->f.states & CStatd))) {
2153 /* Mount point no longer stat'd or unknown. FID may have changed. */
2154 getNewFid = 1;
2155 ReleaseSharedLock(&afs_xvcache);
2156#ifdef AFS_DARWIN80_ENV
2157 if (tvc) {
2158 AFS_GUNLOCK();
2159 vnode_put(AFSTOV(tvc));
2160 vnode_rele(AFSTOV(tvc));
2161 AFS_GLOCK();
2162 }
2163#endif
2164 tvc = NULL;
2165 goto newmtpt;
2166 }
2167
2168 if (!tvc) {
2169 UpgradeSToWLock(&afs_xvcache, 23);
2170 /* no cache entry, better grab one */
2171 tvc = afs_NewVCache(afid, NULL);
2172 if (!tvc)
2173 {
2174 ReleaseWriteLock(&afs_xvcache);
2175 return NULL;
2176 }
2177 newvcache = 1;
2178 afs_stats_cmperf.vcacheMisses++;
2179 } else {
2180 if (cached)
2181 *cached = 1;
2182 afs_stats_cmperf.vcacheHits++;
2183#if defined(AFS_DARWIN80_ENV)
2184 /* we already bumped the ref count in the for loop above */
2185#else /* AFS_DARWIN80_ENV */
2186 osi_vnhold(tvc, 0);
2187#endif
2188 UpgradeSToWLock(&afs_xvcache, 24);
2189 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2190 refpanic("GRVC VLRU inconsistent0");
2191 }
2192 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2193 refpanic("GRVC VLRU inconsistent1");
2194 }
2195 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2196 refpanic("GRVC VLRU inconsistent2");
2197 }
2198 QRemove(&tvc->vlruq); /* move to lruq head */
2199 QAdd(&VLRU, &tvc->vlruq);
2200 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2201 refpanic("GRVC VLRU inconsistent3");
2202 }
2203 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2204 refpanic("GRVC VLRU inconsistent4");
2205 }
2206 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2207 refpanic("GRVC VLRU inconsistent5");
2208 }
2209 vcachegen++;
2210 }
2211
2212 ReleaseWriteLock(&afs_xvcache);
2213
2214 if (tvc->f.states & CStatd) {
2215 return tvc;
2216 } else {
2217
2218 ObtainReadLock(&tvc->lock);
2219 tvc->f.states &= ~CUnique;
2220 tvc->callback = NULL; /* redundant, perhaps */
2221 ReleaseReadLock(&tvc->lock);
2222 }
2223
2224 ObtainWriteLock(&tvc->lock, 57);
2225
2226 /* It is always appropriate to throw away all the access rights? */
2227 afs_FreeAllAxs(&(tvc->Access));
2228
2229 if (newvcache)
2230 tvc->f.states |= CForeign;
2231 if (tvolp->states & VRO)
2232 tvc->f.states |= CRO;
2233 if (tvolp->states & VBackup)
2234 tvc->f.states |= CBackup;
2235 /* now copy ".." entry back out of volume structure, if necessary */
2236 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2237 && (tvolp->rootUnique == afid->Fid.Unique)) {
2238 tvc->mvstat = AFS_MVSTAT_ROOT;
2239 }
2240 if (tvc->mvstat == AFS_MVSTAT_ROOT && tvolp->dotdot.Fid.Volume != 0) {
2241 if (!tvc->mvid.parent)
2242 tvc->mvid.parent = (struct VenusFid *)
2243 osi_AllocSmallSpace(sizeof(struct VenusFid));
2244 *tvc->mvid.parent = tvolp->dotdot;
2245 }
2246
2247 /* stat the file */
2248 afs_RemoveVCB(afid);
2249
2250 if (!haveStatus) {
2251 struct VenusFid tfid;
2252
2253 tfid = *afid;
2254 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2255 origCBs = afs_allCBs; /* ignore InitCallBackState */
2256 code =
2257 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2258 &serverp, &tsync);
2259 }
2260
2261 if (code) {
2262 afs_StaleVCacheFlags(tvc, AFS_STALEVC_CLEARCB, CUnique);
2263 ReleaseWriteLock(&tvc->lock);
2264 afs_PutVCache(tvc);
2265 return NULL;
2266 }
2267
2268 ObtainWriteLock(&afs_xcbhash, 468);
2269 if (origCBs == afs_allCBs) {
2270 tvc->f.states |= CTruth;
2271 tvc->callback = serverp;
2272 if (CallBack.ExpirationTime != 0) {
2273 tvc->cbExpires = CallBack.ExpirationTime + start;
2274 tvc->f.states |= CStatd;
2275 tvc->f.states &= ~CBulkFetching;
2276 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2277 } else if (tvc->f.states & CRO) {
2278 /* adapt gives us an hour. */
2279 tvc->cbExpires = 3600 + osi_Time();
2280 /*XXX*/ tvc->f.states |= CStatd;
2281 tvc->f.states &= ~CBulkFetching;
2282 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2283 }
2284 } else {
2285 afs_StaleVCacheFlags(tvc, AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
2286 CUnique);
2287 }
2288 ReleaseWriteLock(&afs_xcbhash);
2289 afs_ProcessFS(tvc, &OutStatus, areq);
2290
2291 ReleaseWriteLock(&tvc->lock);
2292 return tvc;
2293}
2294
2295
2296/*!
2297 * Update callback status and (sometimes) attributes of a vnode.
2298 * Called after doing a fetch status RPC. Whilst disconnected, attributes
2299 * shouldn't be written to the vcache here.
2300 *
2301 * \param avc
2302 * \param afid
2303 * \param areq
2304 * \param Outsp Server status after rpc call.
2305 * \param acb Callback for this vnode.
2306 *
2307 * \note The vcache must be write locked.
2308 */
2309void
2310afs_UpdateStatus(struct vcache *avc, struct VenusFid *afid,
2311 struct vrequest *areq, struct AFSFetchStatus *Outsp,
2312 struct AFSCallBack *acb, afs_uint32 start)
2313{
2314 struct volume *volp;
2315
2316 if (!AFS_IN_SYNC)
2317 /* Dont write status in vcache if resyncing after a disconnection. */
2318 afs_ProcessFS(avc, Outsp, areq);
2319
2320 volp = afs_GetVolume(afid, areq, READ_LOCK);
2321 ObtainWriteLock(&afs_xcbhash, 469);
2322 avc->f.states |= CTruth;
2323 if (avc->callback /* check for race */ ) {
2324 if (acb->ExpirationTime != 0) {
2325 avc->cbExpires = acb->ExpirationTime + start;
2326 avc->f.states |= CStatd;
2327 avc->f.states &= ~CBulkFetching;
2328 afs_QueueCallback(avc, CBHash(acb->ExpirationTime), volp);
2329 } else if (avc->f.states & CRO) {
2330 /* ordinary callback on a read-only volume -- AFS 3.2 style */
2331 avc->cbExpires = 3600 + start;
2332 avc->f.states |= CStatd;
2333 avc->f.states &= ~CBulkFetching;
2334 afs_QueueCallback(avc, CBHash(3600), volp);
2335 } else {
2336 afs_StaleVCacheFlags(avc,
2337 AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
2338 CUnique);
2339 }
2340 } else {
2341 afs_StaleVCacheFlags(avc, AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
2342 CUnique);
2343 }
2344 ReleaseWriteLock(&afs_xcbhash);
2345 if (volp)
2346 afs_PutVolume(volp, READ_LOCK);
2347}
2348
2349void
2350afs_BadFetchStatus(struct afs_conn *tc)
2351{
2352 int addr = ntohl(tc->parent->srvr->sa_ip);
2353 afs_warn("afs: Invalid AFSFetchStatus from server %u.%u.%u.%u\n",
2354 (addr >> 24) & 0xff, (addr >> 16) & 0xff, (addr >> 8) & 0xff,
2355 (addr) & 0xff);
2356 afs_warn("afs: This suggests the server may be sending bad data that "
2357 "can lead to availability issues or data corruption. The "
2358 "issue has been avoided for now, but it may not always be "
2359 "detectable. Please upgrade the server if possible.\n");
2360}
2361
2362/**
2363 * Check if a given AFSFetchStatus structure is sane.
2364 *
2365 * @param[in] tc The server from which we received the status
2366 * @param[in] status The status we received
2367 *
2368 * @return whether the given structure is valid or not
2369 * @retval 0 the structure is fine
2370 * @retval nonzero the structure looks like garbage; act as if we received
2371 * the returned error code from the server
2372 */
2373int
2374afs_CheckFetchStatus(struct afs_conn *tc, struct AFSFetchStatus *status)
2375{
2376 if (status->errorCode ||
2377 status->InterfaceVersion != 1 ||
2378 !(status->FileType > Invalid && status->FileType <= SymbolicLink) ||
2379 status->ParentVnode == 0 || status->ParentUnique == 0) {
2380
2381 afs_warn("afs: FetchStatus ec %u iv %u ft %u pv %u pu %u\n",
2382 (unsigned)status->errorCode, (unsigned)status->InterfaceVersion,
2383 (unsigned)status->FileType, (unsigned)status->ParentVnode,
2384 (unsigned)status->ParentUnique);
2385 afs_BadFetchStatus(tc);
2386
2387 return VBUSY;
2388 }
2389 return 0;
2390}
2391
2392/*!
2393 * Must be called with avc write-locked
2394 * don't absolutely have to invalidate the hint unless the dv has
2395 * changed, but be sure to get it right else there will be consistency bugs.
2396 */
2397afs_int32
2398afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2399 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2400{
2401 int code;
2402 afs_uint32 start = 0;
2403 struct afs_conn *tc;
2404 struct AFSCallBack CallBack;
2405 struct AFSVolSync tsync;
2406 struct rx_connection *rxconn;
2407 XSTATS_DECLS;
2408 do {
2409 tc = afs_Conn(afid, areq, SHARED_LOCK, &rxconn);
2410 avc->dchint = NULL; /* invalidate hints */
2411 if (tc) {
2412 avc->callback = tc->parent->srvr->server;
2413 start = osi_Time();
2414 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2415 RX_AFS_GUNLOCK();
2416 code =
2417 RXAFS_FetchStatus(rxconn, (struct AFSFid *)&afid->Fid, Outsp,
2418 &CallBack, &tsync);
2419 RX_AFS_GLOCK();
2420
2421 XSTATS_END_TIME;
2422
2423 if (code == 0) {
2424 code = afs_CheckFetchStatus(tc, Outsp);
2425 }
2426
2427 } else
2428 code = -1;
2429 } while (afs_Analyze
2430 (tc, rxconn, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2431 SHARED_LOCK, NULL));
2432
2433 if (!code) {
2434 afs_UpdateStatus(avc, afid, areq, Outsp, &CallBack, start);
2435 } else {
2436 /* used to undo the local callback, but that's too extreme.
2437 * There are plenty of good reasons that fetchstatus might return
2438 * an error, such as EPERM. If we have the vnode cached, statd,
2439 * with callback, might as well keep track of the fact that we
2440 * don't have access...
2441 */
2442 if (code == EPERM || code == EACCES) {
2443 struct axscache *ac;
2444 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2445 ac->axess = 0;
2446 else /* not found, add a new one if possible */
2447 afs_AddAxs(avc->Access, areq->uid, 0);
2448 }
2449 }
2450 return code;
2451}
2452
2453#if 0
2454/*
2455 * afs_StuffVcache
2456 *
2457 * Description:
2458 * Stuff some information into the vcache for the given file.
2459 *
2460 * Parameters:
2461 * afid : File in question.
2462 * OutStatus : Fetch status on the file.
2463 * CallBack : Callback info.
2464 * tc : RPC connection involved.
2465 * areq : vrequest involved.
2466 *
2467 * Environment:
2468 * Nothing interesting.
2469 */
2470void
2471afs_StuffVcache(struct VenusFid *afid,
2472 struct AFSFetchStatus *OutStatus,
2473 struct AFSCallBack *CallBack, struct afs_conn *tc,
2474 struct vrequest *areq)
2475{
2476 afs_int32 code, i, newvcache = 0;
2477 struct vcache *tvc;
2478 struct AFSVolSync tsync;
2479 struct volume *tvp;
2480 struct axscache *ac;
2481 afs_int32 retry;
2482
2483 AFS_STATCNT(afs_StuffVcache);
2484#ifdef IFS_VCACHECOUNT
2485 ifs_gvcachecall++;
2486#endif
2487
2488 loop:
2489 ObtainSharedLock(&afs_xvcache, 8);
2490
2491 tvc = afs_FindVCache(afid, &retry, DO_VLRU| IS_SLOCK /* no stats */ );
2492 if (tvc && retry) {
2493#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2494 ReleaseSharedLock(&afs_xvcache);
2495 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2496 goto loop;
2497#endif
2498 }
2499
2500 if (!tvc) {
2501 /* no cache entry, better grab one */
2502 UpgradeSToWLock(&afs_xvcache, 25);
2503 tvc = afs_NewVCache(afid, NULL);
2504 newvcache = 1;
2505 ConvertWToSLock(&afs_xvcache);
2506 if (!tvc)
2507 {
2508 ReleaseSharedLock(&afs_xvcache);
2509 return NULL;
2510 }
2511 }
2512
2513 ReleaseSharedLock(&afs_xvcache);
2514 ObtainWriteLock(&tvc->lock, 58);
2515
2516 afs_StaleVCacheFlags(tvc, AFS_STALEVC_NOCB, 0);
2517
2518 /* Is it always appropriate to throw away all the access rights? */
2519 afs_FreeAllAxs(&(tvc->Access));
2520
2521 /*Copy useful per-volume info */
2522 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2523 if (tvp) {
2524 if (newvcache && (tvp->states & VForeign))
2525 tvc->f.states |= CForeign;
2526 if (tvp->states & VRO)
2527 tvc->f.states |= CRO;
2528 if (tvp->states & VBackup)
2529 tvc->f.states |= CBackup;
2530 /*
2531 * Now, copy ".." entry back out of volume structure, if
2532 * necessary
2533 */
2534 if (tvc->mvstat == AFS_MVSTAT_ROOT && tvp->dotdot.Fid.Volume != 0) {
2535 if (!tvc->mvid.parent)
2536 tvc->mvid.parent = (struct VenusFid *)
2537 osi_AllocSmallSpace(sizeof(struct VenusFid));
2538 *tvc->mvid.parent = tvp->dotdot;
2539 }
2540 }
2541 /* store the stat on the file */
2542 afs_RemoveVCB(afid);
2543 afs_ProcessFS(tvc, OutStatus, areq);
2544 tvc->callback = tc->srvr->server;
2545
2546 /* we use osi_Time twice below. Ideally, we would use the time at which
2547 * the FetchStatus call began, instead, but we don't have it here. So we
2548 * make do with "now". In the CRO case, it doesn't really matter. In
2549 * the other case, we hope that the difference between "now" and when the
2550 * call actually began execution on the server won't be larger than the
2551 * padding which the server keeps. Subtract 1 second anyway, to be on
2552 * the safe side. Can't subtract more because we don't know how big
2553 * ExpirationTime is. Possible consistency problems may arise if the call
2554 * timeout period becomes longer than the server's expiration padding. */
2555 ObtainWriteLock(&afs_xcbhash, 470);
2556 if (CallBack->ExpirationTime != 0) {
2557 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2558 tvc->f.states |= CStatd;
2559 tvc->f.states &= ~CBulkFetching;
2560 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2561 } else if (tvc->f.states & CRO) {
2562 /* old-fashioned AFS 3.2 style */
2563 tvc->cbExpires = 3600 + osi_Time();
2564 /*XXX*/ tvc->f.states |= CStatd;
2565 tvc->f.states &= ~CBulkFetching;
2566 afs_QueueCallback(tvc, CBHash(3600), tvp);
2567 } else {
2568 afs_StaleVCacheFlags(tvc, AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
2569 CUnique);
2570 }
2571 ReleaseWriteLock(&afs_xcbhash);
2572 if (tvp)
2573 afs_PutVolume(tvp, READ_LOCK);
2574
2575 /* look in per-pag cache */
2576 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2577 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2578 else /* not found, add a new one if possible */
2579 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2580
2581 ReleaseWriteLock(&tvc->lock);
2582 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2583 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2584 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2585 /*
2586 * Release ref count... hope this guy stays around...
2587 */
2588 afs_PutVCache(tvc);
2589} /*afs_StuffVcache */
2590#endif
2591
2592/*!
2593 * Decrements the reference count on a cache entry.
2594 *
2595 * \param avc Pointer to the cache entry to decrement.
2596 *
2597 * \note Environment: Nothing interesting.
2598 */
2599void
2600afs_PutVCache(struct vcache *avc)
2601{
2602 AFS_STATCNT(afs_PutVCache);
2603#ifdef AFS_DARWIN80_ENV
2604 vnode_put(AFSTOV(avc));
2605 AFS_FAST_RELE(avc);
2606#else
2607 /*
2608 * Can we use a read lock here?
2609 */
2610 ObtainReadLock(&afs_xvcache);
2611 AFS_FAST_RELE(avc);
2612 ReleaseReadLock(&afs_xvcache);
2613#endif
2614} /*afs_PutVCache */
2615
2616
2617/*!
2618 * Reset a vcache entry, so local contents are ignored, and the
2619 * server will be reconsulted next time the vcache is used
2620 *
2621 * \param avc Pointer to the cache entry to reset
2622 * \param acred
2623 * \param skipdnlc skip the dnlc purge for this vnode
2624 *
2625 * \note avc must be write locked on entry
2626 *
2627 * \note The caller should purge the dnlc when skipdnlc is set.
2628 */
2629void
2630afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred, afs_int32 skipdnlc)
2631{
2632 afs_stalevc_flags_t flags = 0;
2633 if (skipdnlc) {
2634 flags |= AFS_STALEVC_NODNLC;
2635 }
2636
2637 afs_StaleVCacheFlags(avc, flags, CDirty); /* next reference will re-stat */
2638 /* now find the disk cache entries */
2639 afs_TryToSmush(avc, acred, 1);
2640 if (avc->linkData && !(avc->f.states & CCore)) {
2641 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
2642 avc->linkData = NULL;
2643 }
2644}
2645
2646/*!
2647 * Sleepa when searching for a vcache. Releases all the pending locks,
2648 * sleeps then obtains the previously released locks.
2649 *
2650 * \param vcache Enter sleep state.
2651 * \param flag Determines what locks to use.
2652 *
2653 * \return
2654 */
2655static void
2656findvc_sleep(struct vcache *avc, int flag)
2657{
2658 if (flag & IS_SLOCK) {
2659 ReleaseSharedLock(&afs_xvcache);
2660 } else {
2661 if (flag & IS_WLOCK) {
2662 ReleaseWriteLock(&afs_xvcache);
2663 } else {
2664 ReleaseReadLock(&afs_xvcache);
2665 }
2666 }
2667 afs_osi_Sleep(&avc->f.states);
2668 if (flag & IS_SLOCK) {
2669 ObtainSharedLock(&afs_xvcache, 341);
2670 } else {
2671 if (flag & IS_WLOCK) {
2672 ObtainWriteLock(&afs_xvcache, 343);
2673 } else {
2674 ObtainReadLock(&afs_xvcache);
2675 }
2676 }
2677}
2678
2679/*!
2680 * Add a reference on an existing vcache entry.
2681 *
2682 * \param tvc Pointer to the vcache.
2683 *
2684 * \note Environment: Must be called with at least one reference from
2685 * elsewhere on the vcache, even if that reference will be dropped.
2686 * The global lock is required.
2687 *
2688 * \return 0 on success, -1 on failure.
2689 */
2690
2691int
2692afs_RefVCache(struct vcache *tvc)
2693{
2694#ifdef AFS_DARWIN80_ENV
2695 vnode_t tvp;
2696#endif
2697
2698 /* AFS_STATCNT(afs_RefVCache); */
2699
2700#ifdef AFS_DARWIN80_ENV
2701 tvp = AFSTOV(tvc);
2702 if (vnode_get(tvp))
2703 return -1;
2704 if (vnode_ref(tvp)) {
2705 AFS_GUNLOCK();
2706 /* AFSTOV(tvc) may be NULL */
2707 vnode_put(tvp);
2708 AFS_GLOCK();
2709 return -1;
2710 }
2711#else
2712 osi_vnhold(tvc, 0);
2713#endif
2714 return 0;
2715} /*afs_RefVCache */
2716
2717/*!
2718 * Find a vcache entry given a fid.
2719 *
2720 * \param afid Pointer to the fid whose cache entry we desire.
2721 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2722 * unlock the vnode, and try again.
2723 * \param flag Bit 1 to specify whether to compute hit statistics. Not
2724 * set if FindVCache is called as part of internal bookkeeping.
2725 *
2726 * \note Environment: Must be called with the afs_xvcache lock at least held at
2727 * the read level. In order to do the VLRU adjustment, the xvcache lock
2728 * must be shared-- we upgrade it here.
2729 */
2730
2731struct vcache *
2732afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2733{
2734
2735 struct vcache *tvc;
2736 afs_int32 i;
2737#ifdef AFS_DARWIN80_ENV
2738 struct vcache *deadvc = NULL, *livevc = NULL;
2739 vnode_t tvp;
2740#endif
2741
2742 AFS_STATCNT(afs_FindVCache);
2743
2744 findloop:
2745 i = VCHash(afid);
2746 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2747 if (FidMatches(afid, tvc)) {
2748 if (tvc->f.states & CVInit) {
2749 findvc_sleep(tvc, flag);
2750 goto findloop;
2751 }
2752#ifdef AFS_DARWIN80_ENV
2753 if (tvc->f.states & CDeadVnode) {
2754 findvc_sleep(tvc, flag);
2755 goto findloop;
2756 }
2757#endif
2758 break;
2759 }
2760 }
2761
2762 /* should I have a read lock on the vnode here? */
2763 if (tvc) {
2764 if (retry)
2765 *retry = 0;
2766#if defined(AFS_DARWIN80_ENV)
2767 tvp = AFSTOV(tvc);
2768 if (vnode_get(tvp))
2769 tvp = NULL;
2770 if (tvp && vnode_ref(tvp)) {
2771 AFS_GUNLOCK();
2772 /* AFSTOV(tvc) may be NULL */
2773 vnode_put(tvp);
2774 AFS_GLOCK();
2775 tvp = NULL;
2776 }
2777 if (!tvp) {
2778 tvc = NULL;
2779 return tvc;
2780 }
2781#elif defined(AFS_DARWIN_ENV)
2782 tvc->f.states |= CUBCinit;
2783 AFS_GUNLOCK();
2784 if (UBCINFOMISSING(AFSTOV(tvc)) ||
2785 UBCINFORECLAIMED(AFSTOV(tvc))) {
2786 ubc_info_init(AFSTOV(tvc));
2787 }
2788 AFS_GLOCK();
2789 tvc->f.states &= ~CUBCinit;
2790#else
2791 osi_vnhold(tvc, retry); /* already held, above */
2792 if (retry && *retry)
2793 return 0;
2794#endif
2795 /*
2796 * only move to front of vlru if we have proper vcache locking)
2797 */
2798 if (flag & DO_VLRU) {
2799 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2800 refpanic("FindVC VLRU inconsistent1");
2801 }
2802 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2803 refpanic("FindVC VLRU inconsistent1");
2804 }
2805 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2806 refpanic("FindVC VLRU inconsistent2");
2807 }
2808 UpgradeSToWLock(&afs_xvcache, 26);
2809 QRemove(&tvc->vlruq);
2810 QAdd(&VLRU, &tvc->vlruq);
2811 ConvertWToSLock(&afs_xvcache);
2812 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2813 refpanic("FindVC VLRU inconsistent1");
2814 }
2815 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2816 refpanic("FindVC VLRU inconsistent2");
2817 }
2818 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2819 refpanic("FindVC VLRU inconsistent3");
2820 }
2821 }
2822 vcachegen++;
2823 }
2824
2825 if (flag & DO_STATS) {
2826 if (tvc)
2827 afs_stats_cmperf.vcacheHits++;
2828 else
2829 afs_stats_cmperf.vcacheMisses++;
2830 if (afs_IsPrimaryCellNum(afid->Cell))
2831 afs_stats_cmperf.vlocalAccesses++;
2832 else
2833 afs_stats_cmperf.vremoteAccesses++;
2834 }
2835 return tvc;
2836} /*afs_FindVCache */
2837
2838/*!
2839 * Find a vcache entry given a fid. Does a wildcard match on what we
2840 * have for the fid. If more than one entry, don't return anything.
2841 *
2842 * \param avcp Fill in pointer if we found one and only one.
2843 * \param afid Pointer to the fid whose cache entry we desire.
2844 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2845 * unlock the vnode, and try again.
2846 * \param flags bit 1 to specify whether to compute hit statistics. Not
2847 * set if FindVCache is called as part of internal bookkeeping.
2848 *
2849 * \note Environment: Must be called with the afs_xvcache lock at least held at
2850 * the read level. In order to do the VLRU adjustment, the xvcache lock
2851 * must be shared-- we upgrade it here.
2852 *
2853 * \return Number of matches found.
2854 */
2855
2856int afs_duplicate_nfs_fids = 0;
2857
2858afs_int32
2859afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
2860{
2861 struct vcache *tvc;
2862 afs_int32 i;
2863 afs_int32 count = 0;
2864 struct vcache *found_tvc = NULL;
2865#ifdef AFS_DARWIN80_ENV
2866 vnode_t tvp;
2867#endif
2868
2869 AFS_STATCNT(afs_FindVCache);
2870
2871 loop:
2872
2873 ObtainSharedLock(&afs_xvcache, 331);
2874
2875 i = VCHash(afid);
2876 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2877 /* Match only on what we have.... */
2878 if (((tvc->f.fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2879 && (tvc->f.fid.Fid.Volume == afid->Fid.Volume)
2880 && ((tvc->f.fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2881 && (tvc->f.fid.Cell == afid->Cell)) {
2882 if (tvc->f.states & CVInit) {
2883 ReleaseSharedLock(&afs_xvcache);
2884 afs_osi_Sleep(&tvc->f.states);
2885 goto loop;
2886 }
2887#ifdef AFS_DARWIN80_ENV
2888 if (tvc->f.states & CDeadVnode) {
2889 ReleaseSharedLock(&afs_xvcache);
2890 afs_osi_Sleep(&tvc->f.states);
2891 goto loop;
2892 }
2893 tvp = AFSTOV(tvc);
2894 if (vnode_get(tvp)) {
2895 /* This vnode no longer exists. */
2896 continue;
2897 }
2898 if (vnode_ref(tvp)) {
2899 /* This vnode no longer exists. */
2900 AFS_GUNLOCK();
2901 /* AFSTOV(tvc) may be NULL */
2902 vnode_put(tvp);
2903 AFS_GLOCK();
2904 continue;
2905 }
2906#endif /* AFS_DARWIN80_ENV */
2907 count++;
2908 if (found_tvc) {
2909 /* Duplicates */
2910 afs_duplicate_nfs_fids++;
2911 ReleaseSharedLock(&afs_xvcache);
2912#ifdef AFS_DARWIN80_ENV
2913 /* Drop our reference counts. */
2914 vnode_put(AFSTOV(tvc));
2915 vnode_put(AFSTOV(found_tvc));
2916#endif
2917 return count;
2918 }
2919 found_tvc = tvc;
2920 }
2921 }
2922
2923 tvc = found_tvc;
2924 /* should I have a read lock on the vnode here? */
2925 if (tvc) {
2926#ifndef AFS_DARWIN80_ENV
2927#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2928 afs_int32 retry = 0;
2929 osi_vnhold(tvc, &retry);
2930 if (retry) {
2931 count = 0;
2932 found_tvc = (struct vcache *)0;
2933 ReleaseSharedLock(&afs_xvcache);
2934 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2935 goto loop;
2936 }
2937#else
2938 osi_vnhold(tvc, (int *)0); /* already held, above */
2939#endif
2940#endif
2941 /*
2942 * We obtained the xvcache lock above.
2943 */
2944 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2945 refpanic("FindVC VLRU inconsistent1");
2946 }
2947 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2948 refpanic("FindVC VLRU inconsistent1");
2949 }
2950 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2951 refpanic("FindVC VLRU inconsistent2");
2952 }
2953 UpgradeSToWLock(&afs_xvcache, 568);
2954 QRemove(&tvc->vlruq);
2955 QAdd(&VLRU, &tvc->vlruq);
2956 ConvertWToSLock(&afs_xvcache);
2957 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2958 refpanic("FindVC VLRU inconsistent1");
2959 }
2960 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2961 refpanic("FindVC VLRU inconsistent2");
2962 }
2963 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2964 refpanic("FindVC VLRU inconsistent3");
2965 }
2966 }
2967 vcachegen++;
2968
2969 if (tvc)
2970 afs_stats_cmperf.vcacheHits++;
2971 else
2972 afs_stats_cmperf.vcacheMisses++;
2973 if (afs_IsPrimaryCellNum(afid->Cell))
2974 afs_stats_cmperf.vlocalAccesses++;
2975 else
2976 afs_stats_cmperf.vremoteAccesses++;
2977
2978 *avcp = tvc; /* May be null */
2979
2980 ReleaseSharedLock(&afs_xvcache);
2981 return (tvc ? 1 : 0);
2982
2983} /*afs_NFSFindVCache */
2984
2985
2986
2987
2988/*!
2989 * Initialize vcache related variables
2990 *
2991 * \param astatSize
2992 */
2993void
2994afs_vcacheInit(int astatSize)
2995{
2996#if !defined(AFS_LINUX22_ENV)
2997 struct vcache *tvp;
2998#endif
2999 int i;
3000 if (!afs_maxvcount) {
3001 afs_maxvcount = astatSize; /* no particular limit on linux? */
3002 }
3003#if !defined(AFS_LINUX22_ENV)
3004 freeVCList = NULL;
3005#endif
3006
3007 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3008 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3009
3010#if !defined(AFS_LINUX22_ENV)
3011 /* Allocate and thread the struct vcache entries */
3012 tvp = afs_osi_Alloc(astatSize * sizeof(struct vcache));
3013 osi_Assert(tvp != NULL);
3014 memset(tvp, 0, sizeof(struct vcache) * astatSize);
3015
3016 Initial_freeVCList = tvp;
3017 freeVCList = &(tvp[0]);
3018 for (i = 0; i < astatSize - 1; i++) {
3019 tvp[i].nextfree = &(tvp[i + 1]);
3020 }
3021 tvp[astatSize - 1].nextfree = NULL;
3022# ifdef KERNEL_HAVE_PIN
3023 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
3024# endif
3025#endif
3026
3027#if defined(AFS_SGI_ENV)
3028 for (i = 0; i < astatSize; i++) {
3029 char name[METER_NAMSZ];
3030 struct vcache *tvc = &tvp[i];
3031
3032 tvc->v.v_number = ++afsvnumbers;
3033 tvc->vc_rwlockid = OSI_NO_LOCKID;
3034 initnsema(&tvc->vc_rwlock, 1,
3035 makesname(name, "vrw", tvc->v.v_number));
3036#ifndef AFS_SGI53_ENV
3037 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
3038#endif
3039#ifndef AFS_SGI62_ENV
3040 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
3041#endif /* AFS_SGI62_ENV */
3042 }
3043#endif
3044 QInit(&VLRU);
3045 for(i = 0; i < VCSIZE; ++i)
3046 QInit(&afs_vhashTV[i]);
3047}
3048
3049/*!
3050 * Shutdown vcache.
3051 */
3052void
3053shutdown_vcache(void)
3054{
3055 int i;
3056 struct afs_cbr *tsp;
3057 /*
3058 * XXX We may potentially miss some of the vcaches because if when
3059 * there are no free vcache entries and all the vcache entries are active
3060 * ones then we allocate an additional one - admittedly we almost never
3061 * had that occur.
3062 */
3063
3064 {
3065 struct afs_q *tq, *uq = NULL;
3066 struct vcache *tvc;
3067 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
3068 tvc = QTOV(tq);
3069 uq = QPrev(tq);
3070 if (tvc->mvid.target_root) {
3071 osi_FreeSmallSpace(tvc->mvid.target_root);
3072 tvc->mvid.target_root = NULL;
3073 }
3074#ifdef AFS_AIX_ENV
3075 aix_gnode_rele(AFSTOV(tvc));
3076#endif
3077 if (tvc->linkData) {
3078 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3079 tvc->linkData = 0;
3080 }
3081 }
3082 /*
3083 * Also free the remaining ones in the Cache
3084 */
3085 for (i = 0; i < VCSIZE; i++) {
3086 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3087 if (tvc->mvid.target_root) {
3088 osi_FreeSmallSpace(tvc->mvid.target_root);
3089 tvc->mvid.target_root = NULL;
3090 }
3091#ifdef AFS_AIX_ENV
3092 if (tvc->v.v_gnode)
3093 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
3094#ifdef AFS_AIX32_ENV
3095 if (tvc->segid) {
3096 AFS_GUNLOCK();
3097 vms_delete(tvc->segid);
3098 AFS_GLOCK();
3099 tvc->segid = tvc->vmh = NULL;
3100 if (VREFCOUNT_GT(tvc,0))
3101 osi_Panic("flushVcache: vm race");
3102 }
3103 if (tvc->credp) {
3104 crfree(tvc->credp);
3105 tvc->credp = NULL;
3106 }
3107#endif
3108#endif
3109#if defined(AFS_SUN5_ENV)
3110 if (tvc->credp) {
3111 crfree(tvc->credp);
3112 tvc->credp = NULL;
3113 }
3114#endif
3115 if (tvc->linkData) {
3116 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3117 tvc->linkData = 0;
3118 }
3119
3120 if (tvc->Access)
3121 afs_FreeAllAxs(&(tvc->Access));
3122 }
3123 afs_vhashT[i] = 0;
3124 }
3125 }
3126 /*
3127 * Free any leftover callback queue
3128 */
3129 for (i = 0; i < afs_stats_cmperf.CallBackAlloced; i++) {
3130 tsp = afs_cbrHeads[i];
3131 afs_cbrHeads[i] = 0;
3132 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
3133 }
3134 afs_cbrSpace = 0;
3135
3136#if !defined(AFS_LINUX22_ENV)
3137 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3138
3139# ifdef KERNEL_HAVE_PIN
3140 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3141# endif
3142
3143 freeVCList = Initial_freeVCList = 0;
3144#endif
3145
3146 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3147 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3148 QInit(&VLRU);
3149 for(i = 0; i < VCSIZE; ++i)
3150 QInit(&afs_vhashTV[i]);
3151}
3152
3153void
3154afs_DisconGiveUpCallbacks(void)
3155{
3156 int i;
3157 struct vcache *tvc;
3158 int nq=0;
3159
3160 ObtainWriteLock(&afs_xvcache, 1002); /* XXX - should be a unique number */
3161
3162 retry:
3163 /* Somehow, walk the set of vcaches, with each one coming out as tvc */
3164 for (i = 0; i < VCSIZE; i++) {
3165 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3166 int slept = 0;
3167 if (afs_QueueVCB(tvc, &slept)) {
3168 tvc->callback = NULL;
3169 nq++;
3170 }
3171 if (slept) {
3172 goto retry;
3173 }
3174 }
3175 }
3176
3177 ReleaseWriteLock(&afs_xvcache);
3178
3179 afs_FlushVCBs(2);
3180}
3181
3182/*!
3183 *
3184 * Clear the Statd flag from all vcaches
3185 *
3186 * This function removes the Statd flag from all vcaches. It's used by
3187 * disconnected mode to tidy up during reconnection
3188 *
3189 */
3190void
3191afs_ClearAllStatdFlag(void)
3192{
3193 int i;
3194 struct vcache *tvc;
3195
3196 ObtainWriteLock(&afs_xvcache, 715);
3197
3198 for (i = 0; i < VCSIZE; i++) {
3199 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3200 afs_StaleVCacheFlags(tvc, AFS_STALEVC_NODNLC | AFS_STALEVC_NOCB,
3201 CUnique);
3202 }
3203 }
3204 ReleaseWriteLock(&afs_xvcache);
3205}
3206
3207/**
3208 * Mark a vcache as stale; our metadata for the relevant file may be out of
3209 * date.
3210 *
3211 * @post Any subsequent access to this vcache will cause us to fetch the
3212 * metadata for this vcache again.
3213 */
3214void
3215afs_StaleVCacheFlags(struct vcache *avc, afs_stalevc_flags_t flags,
3216 afs_uint32 cflags)
3217{
3218 int do_dnlc = 1;
3219 int do_filename = 0;
3220 int do_dequeue = 1;
3221 int lock_cbhash = 1;
3222
3223 if ((flags & AFS_STALEVC_NODNLC)) {
3224 do_dnlc = 0;
3225 }
3226 if ((flags & AFS_STALEVC_FILENAME)) {
3227 do_filename = 1;
3228 }
3229 if ((flags & AFS_STALEVC_CBLOCKED)) {
3230 lock_cbhash = 0;
3231 }
3232 if ((flags & AFS_STALEVC_NOCB)) {
3233 do_dequeue = 0;
3234 lock_cbhash = 0;
3235 }
3236
3237 if (lock_cbhash) {
3238 ObtainWriteLock(&afs_xcbhash, 486);
3239 }
3240 if (do_dequeue) {
3241 afs_DequeueCallback(avc);
3242 }
3243
3244 cflags |= CStatd;
3245 avc->f.states &= ~cflags;
3246
3247 if (lock_cbhash) {
3248 ReleaseWriteLock(&afs_xcbhash);
3249 }
3250
3251 if ((flags & AFS_STALEVC_SKIP_DNLC_FOR_INIT_FLUSHED) &&
3252 (avc->f.states & (CVInit | CVFlushed))) {
3253 do_dnlc = 0;
3254 }
3255
3256 if (flags & AFS_STALEVC_CLEARCB) {
3257 avc->callback = NULL;
3258 }
3259
3260 if (do_dnlc) {
3261 if ((avc->f.fid.Fid.Vnode & 1) ||
3262 AFSTOV(avc) == NULL || vType(avc) == VDIR ||
3263 (avc->f.states & CForeign)) {
3264 /* This vcache is (or could be) a directory. */
3265 osi_dnlc_purgedp(avc);
3266
3267 } else if (do_filename) {
3268 osi_dnlc_purgevp(avc);
3269 }
3270 }
3271}
3272
3273void
3274afs_SetDataVersion(struct vcache *avc, afs_hyper_t *avers)
3275{
3276 hset(avc->f.m.DataVersion, *avers);
3277}