Import Upstream version 1.8.5
[hcoop/debian/openafs.git] / src / afs / FBSD / osi_vm.c
CommitLineData
805e021f
CE
1/*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10
11/* osi_vm.c implements:
12 *
13 * osi_VM_FlushVCache(avc)
14 * osi_ubc_flush_dirty_and_wait(vp, flags)
15 * osi_VM_StoreAllSegments(avc)
16 * osi_VM_TryToSmush(avc, acred, sync)
17 * osi_VM_FlushPages(avc, credp)
18 * osi_VM_Truncate(avc, alen, acred)
19 */
20
21#include <afsconfig.h>
22#include "afs/param.h"
23#include <sys/param.h>
24#include <sys/vnode.h>
25
26
27#include "afs/sysincludes.h" /* Standard vendor system headers */
28#include "afsincludes.h" /* Afs-based standard headers */
29#include "afs/afs_stats.h" /* statistics */
30#include <vm/vm_object.h>
31#include <vm/vm_map.h>
32#include <sys/limits.h>
33#if __FreeBSD_version >= 1000030
34#include <sys/rwlock.h>
35#endif
36
37/*
38 * FreeBSD implementation notes:
39 * Most of these operations require us to frob vm_objects. Most
40 * functions require that the object be locked (with VM_OBJECT_*LOCK)
41 * on entry and leave it locked on exit. The locking protocol
42 * requires that we access vp->v_object with the heavy vnode lock
43 * held and the vnode interlock unlocked.
44 *
45 * The locking protocol for vnodes is defined in
46 * kern/vnode_if.src and sys/vnode.h; unfortunately, it is not *quite*
47 * constant from version to version so to be properly correct we must
48 * check the VCS history of those files.
49 */
50
51#if defined(AFS_FBSD80_ENV)
52#define lock_vnode(v, f) vn_lock((v), (f))
53#define ilock_vnode(v) vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY)
54#define unlock_vnode(v) VOP_UNLOCK((v), 0)
55#define islocked_vnode(v) VOP_ISLOCKED((v))
56#else
57#define lock_vnode(v, f) vn_lock((v), (f), curthread)
58#define ilock_vnode(v) vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread)
59#define unlock_vnode(v) VOP_UNLOCK((v), 0, curthread)
60#define islocked_vnode(v) VOP_ISLOCKED((v), curthread)
61#endif
62
63#if __FreeBSD_version >= 1000030
64#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_WLOCK(o)
65#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_WUNLOCK(o)
66#else
67#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_LOCK(o)
68#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_UNLOCK(o)
69#endif
70
71/* Try to discard pages, in order to recycle a vcache entry.
72 *
73 * We also make some sanity checks: ref count, open count, held locks.
74 *
75 * We also do some non-VM-related chores, such as releasing the cred pointer
76 * (for AIX and Solaris) and releasing the gnode (for AIX).
77 *
78 * Locking: afs_xvcache lock is held. It must not be dropped.
79 *
80 */
81int
82osi_VM_FlushVCache(struct vcache *avc)
83{
84 struct vnode *vp;
85 int code;
86
87 vp = AFSTOV(avc);
88
89 if (!VI_TRYLOCK(vp))
90 return EBUSY;
91 code = osi_fbsd_checkinuse(avc);
92 if (code) {
93 VI_UNLOCK(vp);
94 return code;
95 }
96
97 /* must hold the vnode before calling cache_purge()
98 * This code largely copied from vfs_subr.c:vlrureclaim() */
99 vholdl(vp);
100 VI_UNLOCK(vp);
101
102 AFS_GUNLOCK();
103 cache_purge(vp);
104 AFS_GLOCK();
105
106 vdrop(vp);
107
108 return 0;
109}
110
111/* Try to store pages to cache, in order to store a file back to the server.
112 *
113 * Locking: the vcache entry's lock is held. It will usually be dropped and
114 * re-obtained.
115 */
116void
117osi_VM_StoreAllSegments(struct vcache *avc)
118{
119 struct vnode *vp;
120 struct vm_object *obj;
121 int anyio, tries;
122
123 ReleaseWriteLock(&avc->lock);
124 AFS_GUNLOCK();
125 tries = 5;
126 vp = AFSTOV(avc);
127
128 /*
129 * I don't understand this. Why not just call vm_object_page_clean()
130 * and be done with it? I particularly don't understand why we're calling
131 * vget() here. Is there some reason to believe that the vnode might
132 * be being recycled at this point? I don't think there's any need for
133 * this loop, either -- if we keep the vnode locked all the time,
134 * that and the object lock will prevent any new pages from appearing.
135 * The loop is what causes the race condition. -GAW
136 */
137 do {
138 anyio = 0;
139
140 obj = vp->v_object;
141 if (obj != NULL && obj->flags & OBJ_MIGHTBEDIRTY) {
142 if (!vget(vp, LK_EXCLUSIVE | LK_RETRY, curthread)) {
143 obj = vp->v_object;
144 if (obj != NULL) {
145 AFS_VM_OBJECT_WLOCK(obj);
146 vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
147 AFS_VM_OBJECT_WUNLOCK(obj);
148 anyio = 1;
149 }
150 vput(vp);
151 }
152 }
153 } while (anyio && (--tries > 0));
154 AFS_GLOCK();
155 ObtainWriteLock(&avc->lock, 94);
156}
157
158/* Try to invalidate pages, for "fs flush" or "fs flushv"; or
159 * try to free pages, when deleting a file.
160 *
161 * Locking: the vcache entry's lock is held. It may be dropped and
162 * re-obtained.
163 *
164 * Since we drop and re-obtain the lock, we can't guarantee that there won't
165 * be some pages around when we return, newly created by concurrent activity.
166 */
167void
168osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync)
169{
170 struct vnode *vp;
171 int tries, code;
172 int islocked;
173
174 vp = AFSTOV(avc);
175
176 VI_LOCK(vp);
177 if (vp->v_iflag & VI_DOOMED) {
178 VI_UNLOCK(vp);
179 return;
180 }
181 VI_UNLOCK(vp);
182
183 islocked = islocked_vnode(vp);
184 if (islocked == LK_EXCLOTHER)
185 panic("Trying to Smush over someone else's lock");
186 else if (islocked == LK_SHARED) {
187 afs_warn("Trying to Smush with a shared lock");
188 lock_vnode(vp, LK_UPGRADE);
189 } else if (!islocked)
190 lock_vnode(vp, LK_EXCLUSIVE);
191
192 if (vp->v_bufobj.bo_object != NULL) {
193 AFS_VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
194 /*
195 * Do we really want OBJPC_SYNC? OBJPC_INVAL would be
196 * faster, if invalidation is really what we are being
197 * asked to do. (It would make more sense, too, since
198 * otherwise this function is practically identical to
199 * osi_VM_StoreAllSegments().) -GAW
200 */
201
202 /*
203 * Dunno. We no longer resemble osi_VM_StoreAllSegments,
204 * though maybe that's wrong, now. And OBJPC_SYNC is the
205 * common thing in 70 file systems, it seems. Matt.
206 */
207
208 vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
209 AFS_VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
210 }
211
212 tries = 5;
213 code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
214 while (code && (tries > 0)) {
215 afs_warn("TryToSmush retrying vinvalbuf");
216 code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
217 --tries;
218 }
219 if (islocked == LK_SHARED)
220 lock_vnode(vp, LK_DOWNGRADE);
221 else if (!islocked)
222 unlock_vnode(vp);
223}
224
225/* Purge VM for a file when its callback is revoked.
226 *
227 * Locking: No lock is held, not even the global lock.
228 */
229void
230osi_VM_FlushPages(struct vcache *avc, afs_ucred_t *credp)
231{
232 struct vnode *vp;
233 struct vm_object *obj;
234
235 vp = AFSTOV(avc);
236 ASSERT_VOP_LOCKED(vp, __func__);
237 obj = vp->v_object;
238 if (obj != NULL) {
239 AFS_VM_OBJECT_WLOCK(obj);
240 vm_object_page_remove(obj, 0, 0, FALSE);
241 AFS_VM_OBJECT_WUNLOCK(obj);
242 }
243 osi_vinvalbuf(vp, 0, 0, 0);
244}
245
246/* Purge pages beyond end-of-file, when truncating a file.
247 *
248 * Locking: no lock is held, not even the global lock.
249 * activeV is raised. This is supposed to block pageins, but at present
250 * it only works on Solaris.
251 */
252void
253osi_VM_Truncate(struct vcache *avc, int alen, afs_ucred_t *acred)
254{
255 vnode_pager_setsize(AFSTOV(avc), alen);
256}