Commit | Line | Data |
---|---|---|
805e021f CE |
1 | /* |
2 | * Copyright 2000, International Business Machines Corporation and others. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This software has been released under the terms of the IBM Public | |
6 | * License. For details, see the LICENSE file in the top-level source | |
7 | * directory or online at http://www.openafs.org/dl/license10.html | |
8 | */ | |
9 | ||
10 | #include <afsconfig.h> | |
11 | #include "afs/param.h" | |
12 | ||
13 | #include "afs/sysincludes.h" /*Standard vendor system headers */ | |
14 | #include "afsincludes.h" /*AFS-based standard headers */ | |
15 | ||
16 | int | |
17 | osi_TryEvictVCache(struct vcache *avc, int *slept, int defersleep) | |
18 | { | |
19 | struct vnode *vp; | |
20 | int code; | |
21 | ||
22 | vp = AFSTOV(avc); | |
23 | ||
24 | if (!VI_TRYLOCK(vp)) | |
25 | return 0; | |
26 | code = osi_fbsd_checkinuse(avc); | |
27 | if (code != 0) { | |
28 | VI_UNLOCK(vp); | |
29 | return 0; | |
30 | } | |
31 | ||
32 | if ((vp->v_iflag & VI_DOOMED) != 0) { | |
33 | VI_UNLOCK(vp); | |
34 | return 1; | |
35 | } | |
36 | ||
37 | /* must hold the vnode before calling vgone() | |
38 | * This code largely copied from vfs_subr.c:vlrureclaim() */ | |
39 | vholdl(vp); | |
40 | ||
41 | ReleaseWriteLock(&afs_xvcache); | |
42 | AFS_GUNLOCK(); | |
43 | ||
44 | *slept = 1; | |
45 | /* use the interlock while locking, so no one else can DOOM this */ | |
46 | vn_lock(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY); | |
47 | vgone(vp); | |
48 | VOP_UNLOCK(vp, 0); | |
49 | vdrop(vp); | |
50 | ||
51 | AFS_GLOCK(); | |
52 | ObtainWriteLock(&afs_xvcache, 340); | |
53 | return 1; | |
54 | } | |
55 | ||
56 | struct vcache * | |
57 | osi_NewVnode(void) { | |
58 | struct vcache *tvc; | |
59 | ||
60 | tvc = afs_osi_Alloc(sizeof(struct vcache)); | |
61 | tvc->v = NULL; /* important to clean this, or use memset 0 */ | |
62 | ||
63 | return tvc; | |
64 | } | |
65 | ||
66 | void | |
67 | osi_PrePopulateVCache(struct vcache *avc) { | |
68 | memset(avc, 0, sizeof(struct vcache)); | |
69 | } | |
70 | ||
71 | void | |
72 | osi_AttachVnode(struct vcache *avc, int seq) { | |
73 | struct vnode *vp; | |
74 | ||
75 | ReleaseWriteLock(&afs_xvcache); | |
76 | AFS_GUNLOCK(); | |
77 | if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp)) | |
78 | panic("afs getnewvnode"); /* can't happen */ | |
79 | /* XXX verified on 80--TODO check on 7x */ | |
80 | if (!vp->v_mount) { | |
81 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */ | |
82 | insmntque(vp, afs_globalVFS); | |
83 | VOP_UNLOCK(vp, 0); | |
84 | } | |
85 | AFS_GLOCK(); | |
86 | ObtainWriteLock(&afs_xvcache,339); | |
87 | if (avc->v != NULL) { | |
88 | /* I'd like to know if this ever happens... | |
89 | * We don't drop global for the rest of this function, | |
90 | * so if we do lose the race, the other thread should | |
91 | * have found the same vnode and finished initializing | |
92 | * the vcache entry. Is it conceivable that this vcache | |
93 | * entry could be recycled during this interval? If so, | |
94 | * then there probably needs to be some sort of additional | |
95 | * mutual exclusion (an Embryonic flag would suffice). | |
96 | * -GAW */ | |
97 | afs_warn("afs_NewVCache: lost the race\n"); | |
98 | return; | |
99 | } | |
100 | avc->v = vp; | |
101 | avc->v->v_data = avc; | |
102 | lockinit(&avc->rwlock, PINOD, "vcache", 0, 0); | |
103 | } | |
104 | ||
105 | void | |
106 | osi_PostPopulateVCache(struct vcache *avc) { | |
107 | avc->v->v_mount = afs_globalVFS; | |
108 | vSetType(avc, VREG); | |
109 | } | |
110 |