Commit | Line | Data |
---|---|---|
805e021f CE |
1 | /* |
2 | * Copyright 2000, International Business Machines Corporation and others. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This software has been released under the terms of the IBM Public | |
6 | * License. For details, see the LICENSE file in the top-level source | |
7 | * directory or online at http://www.openafs.org/dl/license10.html | |
8 | */ | |
9 | ||
10 | /* | |
11 | * Implements: | |
12 | * afs_write | |
13 | * afs_UFSWriteUIO | |
14 | * afs_StoreOnLastReference | |
15 | * afs_close | |
16 | * afs_fsync | |
17 | */ | |
18 | ||
19 | #include <afsconfig.h> | |
20 | #include "afs/param.h" | |
21 | ||
22 | ||
23 | #include "afs/sysincludes.h" /* Standard vendor system headers */ | |
24 | #include "afsincludes.h" /* Afs-based standard headers */ | |
25 | #include "afs/afs_stats.h" /* statistics */ | |
26 | #include "afs/afs_cbqueue.h" | |
27 | #include "afs/nfsclient.h" | |
28 | #include "afs/afs_osidnlc.h" | |
29 | ||
30 | ||
31 | extern unsigned char *afs_indexFlags; | |
32 | ||
33 | /* Called by all write-on-close routines: regular afs_close, | |
34 | * store via background daemon and store via the | |
35 | * afs_FlushActiveVCaches routine (when CCORE is on). | |
36 | * avc->lock must be write-locked. | |
37 | */ | |
38 | int | |
39 | afs_StoreOnLastReference(struct vcache *avc, | |
40 | struct vrequest *treq) | |
41 | { | |
42 | int code = 0; | |
43 | ||
44 | AFS_STATCNT(afs_StoreOnLastReference); | |
45 | /* if CCore flag is set, we clear it and do the extra decrement | |
46 | * ourselves now. If we're called by the CCore clearer, the CCore | |
47 | * flag will already be clear, so we don't have to worry about | |
48 | * clearing it twice. */ | |
49 | if (avc->f.states & CCore) { | |
50 | afs_ucred_t *cred; | |
51 | ||
52 | avc->f.states &= ~CCore; | |
53 | #if defined(AFS_SGI_ENV) | |
54 | osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0); | |
55 | #endif | |
56 | /* WARNING: Our linux cm code treats the execsOrWriters counter differently | |
57 | * depending on the flags the file was opened with. So, if you make any | |
58 | * changes to the way the execsOrWriters flag is handled check with the | |
59 | * top level code. */ | |
60 | avc->opens--; | |
61 | avc->execsOrWriters--; | |
62 | AFS_RELE(AFSTOV(avc)); /* VN_HOLD at set CCore(afs_FakeClose) */ | |
63 | cred = (afs_ucred_t *)avc->linkData; /* "crheld" in afs_FakeClose */ | |
64 | crfree(cred); | |
65 | avc->linkData = NULL; | |
66 | } | |
67 | ||
68 | if (!AFS_IS_DISCONNECTED) { | |
69 | /* Connected. */ | |
70 | ||
71 | /* Now, send the file back. Used to require 0 writers left, but now do | |
72 | * it on every close for write, since two closes in a row are harmless | |
73 | * since first will clean all chunks, and second will be noop. Note that | |
74 | * this will also save confusion when someone keeps a file open | |
75 | * inadvertently, since with old system, writes to the server would never | |
76 | * happen again. | |
77 | */ | |
78 | code = afs_StoreAllSegments(avc, treq, AFS_LASTSTORE /*!sync-to-disk */ ); | |
79 | /* | |
80 | * We have to do these after the above store in done: in some systems | |
81 | * like aix they'll need to flush all the vm dirty pages to the disk via | |
82 | * the strategy routine. During that all procedure (done under no avc | |
83 | * locks) opens, refcounts would be zero, since it didn't reach the | |
84 | * afs_{rd,wr} routines which means the vcache is a perfect candidate | |
85 | * for flushing! | |
86 | */ | |
87 | } else if (AFS_IS_DISCON_RW) { | |
88 | afs_DisconAddDirty(avc, VDisconWriteClose, 0); | |
89 | } /* if not disconnected */ | |
90 | ||
91 | #if defined(AFS_SGI_ENV) | |
92 | osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0); | |
93 | #endif | |
94 | ||
95 | avc->opens--; | |
96 | avc->execsOrWriters--; | |
97 | return code; | |
98 | } | |
99 | ||
100 | int | |
101 | afs_UFSWriteUIO(struct vcache *avc, afs_dcache_id_t *inode, struct uio *tuiop) | |
102 | { | |
103 | struct osi_file *tfile; | |
104 | int code; | |
105 | ||
106 | tfile = (struct osi_file *)osi_UFSOpen(inode); | |
107 | if (!tfile) | |
108 | return -1; | |
109 | ||
110 | #if defined(AFS_AIX41_ENV) | |
111 | AFS_GUNLOCK(); | |
112 | code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, tuiop, NULL, NULL, | |
113 | NULL, afs_osi_credp); | |
114 | AFS_GLOCK(); | |
115 | #elif defined(AFS_AIX32_ENV) | |
116 | code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, tuiop, NULL, NULL); | |
117 | #elif defined(AFS_AIX_ENV) | |
118 | code = VNOP_RDWR(tfile->vnode, UIO_WRITE, FWRITE, (off_t) &offset, | |
119 | tuiop, NULL, NULL, -1); | |
120 | #elif defined(AFS_SUN5_ENV) | |
121 | AFS_GUNLOCK(); | |
122 | # ifdef AFS_SUN510_ENV | |
123 | VOP_RWLOCK(tfile->vnode, 1, NULL); | |
124 | code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp, NULL); | |
125 | VOP_RWUNLOCK(tfile->vnode, 1, NULL); | |
126 | # else | |
127 | VOP_RWLOCK(tfile->vnode, 1); | |
128 | code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp); | |
129 | VOP_RWUNLOCK(tfile->vnode, 1); | |
130 | # endif | |
131 | AFS_GLOCK(); | |
132 | if (code == ENOSPC) | |
133 | afs_WarnENOSPC(); | |
134 | #elif defined(AFS_SGI_ENV) | |
135 | AFS_GUNLOCK(); | |
136 | avc->f.states |= CWritingUFS; | |
137 | AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE); | |
138 | AFS_VOP_WRITE(tfile->vnode, tuiop, IO_ISLOCKED, afs_osi_credp, code); | |
139 | AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE); | |
140 | avc->f.states &= ~CWritingUFS; | |
141 | AFS_GLOCK(); | |
142 | #elif defined(AFS_HPUX100_ENV) | |
143 | { | |
144 | AFS_GUNLOCK(); | |
145 | code = VOP_RDWR(tfile->vnode, tuiop, UIO_WRITE, 0, afs_osi_credp); | |
146 | AFS_GLOCK(); | |
147 | } | |
148 | #elif defined(AFS_LINUX20_ENV) | |
149 | AFS_GUNLOCK(); | |
150 | code = osi_rdwr(tfile, tuiop, UIO_WRITE); | |
151 | AFS_GLOCK(); | |
152 | #elif defined(AFS_DARWIN80_ENV) | |
153 | AFS_GUNLOCK(); | |
154 | code = VNOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_ctxtp); | |
155 | AFS_GLOCK(); | |
156 | #elif defined(AFS_DARWIN_ENV) | |
157 | AFS_GUNLOCK(); | |
158 | VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc()); | |
159 | code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp); | |
160 | VOP_UNLOCK(tfile->vnode, 0, current_proc()); | |
161 | AFS_GLOCK(); | |
162 | #elif defined(AFS_FBSD80_ENV) | |
163 | AFS_GUNLOCK(); | |
164 | VOP_LOCK(tfile->vnode, LK_EXCLUSIVE); | |
165 | code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp); | |
166 | VOP_UNLOCK(tfile->vnode, 0); | |
167 | AFS_GLOCK(); | |
168 | #elif defined(AFS_FBSD_ENV) | |
169 | AFS_GUNLOCK(); | |
170 | VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread); | |
171 | code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp); | |
172 | VOP_UNLOCK(tfile->vnode, 0, curthread); | |
173 | AFS_GLOCK(); | |
174 | #elif defined(AFS_NBSD_ENV) | |
175 | AFS_GUNLOCK(); | |
176 | VOP_LOCK(tfile->vnode, LK_EXCLUSIVE); | |
177 | code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp); | |
178 | #if defined(AFS_NBSD60_ENV) | |
179 | VOP_UNLOCK(tfile->vnode); | |
180 | #else | |
181 | VOP_UNLOCK(tfile->vnode, 0); | |
182 | #endif | |
183 | AFS_GLOCK(); | |
184 | #elif defined(AFS_XBSD_ENV) | |
185 | AFS_GUNLOCK(); | |
186 | VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc); | |
187 | code = VOP_WRITE(tfile->vnode, tuiop, 0, afs_osi_credp); | |
188 | VOP_UNLOCK(tfile->vnode, 0, curproc); | |
189 | AFS_GLOCK(); | |
190 | #else | |
191 | # ifdef AFS_HPUX_ENV | |
192 | tuio.uio_fpflags &= ~FSYNCIO; /* don't do sync io */ | |
193 | # endif | |
194 | code = VOP_RDWR(tfile->vnode, tuiop, UIO_WRITE, 0, afs_osi_credp); | |
195 | #endif | |
196 | osi_UFSClose(tfile); | |
197 | ||
198 | return code; | |
199 | } | |
200 | ||
201 | /* called on writes */ | |
202 | int | |
203 | afs_write(struct vcache *avc, struct uio *auio, int aio, | |
204 | afs_ucred_t *acred, int noLock) | |
205 | { | |
206 | afs_size_t totalLength; | |
207 | afs_size_t transferLength; | |
208 | afs_size_t filePos; | |
209 | afs_size_t offset, len; | |
210 | afs_int32 tlen; | |
211 | afs_int32 trimlen; | |
212 | afs_int32 startDate; | |
213 | afs_int32 max; | |
214 | struct dcache *tdc; | |
215 | #ifdef _HIGHC_ | |
216 | volatile | |
217 | #endif | |
218 | afs_int32 error; | |
219 | #if defined(AFS_FBSD_ENV) || defined(AFS_DFBSD_ENV) | |
220 | struct vnode *vp = AFSTOV(avc); | |
221 | #endif | |
222 | struct uio *tuiop = NULL; | |
223 | afs_int32 code; | |
224 | struct vrequest *treq = NULL; | |
225 | ||
226 | AFS_STATCNT(afs_write); | |
227 | ||
228 | if (avc->vc_error) | |
229 | return avc->vc_error; | |
230 | ||
231 | if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) | |
232 | return ENETDOWN; | |
233 | ||
234 | startDate = osi_Time(); | |
235 | if ((code = afs_CreateReq(&treq, acred))) | |
236 | return code; | |
237 | /* otherwise we read */ | |
238 | totalLength = AFS_UIO_RESID(auio); | |
239 | filePos = AFS_UIO_OFFSET(auio); | |
240 | error = 0; | |
241 | transferLength = 0; | |
242 | afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc, | |
243 | ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET, | |
244 | ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET, | |
245 | ICL_HANDLE_OFFSET(avc->f.m.Length)); | |
246 | if (!noLock) { | |
247 | afs_MaybeWakeupTruncateDaemon(); | |
248 | ObtainWriteLock(&avc->lock, 556); | |
249 | } | |
250 | #if defined(AFS_SGI_ENV) | |
251 | { | |
252 | off_t diff; | |
253 | /* | |
254 | * afs_xwrite handles setting m.Length | |
255 | * and handles APPEND mode. | |
256 | * Since we are called via strategy, we need to trim the write to | |
257 | * the actual size of the file | |
258 | */ | |
259 | osi_Assert(filePos <= avc->f.m.Length); | |
260 | diff = avc->f.m.Length - filePos; | |
261 | AFS_UIO_SETRESID(auio, MIN(totalLength, diff)); | |
262 | totalLength = AFS_UIO_RESID(auio); | |
263 | } | |
264 | #else | |
265 | if (aio & IO_APPEND) { | |
266 | /* append mode, start it at the right spot */ | |
267 | #if defined(AFS_SUN5_ENV) | |
268 | auio->uio_loffset = 0; | |
269 | #endif | |
270 | filePos = avc->f.m.Length; | |
271 | AFS_UIO_SETOFFSET(auio, avc->f.m.Length); | |
272 | } | |
273 | #endif | |
274 | /* | |
275 | * Note that we use startDate rather than calling osi_Time() here. | |
276 | * This is to avoid counting lock-waiting time in file date (for ranlib). | |
277 | */ | |
278 | avc->f.m.Date = startDate; | |
279 | ||
280 | #if defined(AFS_HPUX_ENV) | |
281 | #if defined(AFS_HPUX101_ENV) | |
282 | if ((totalLength + filePos) >> 9 > | |
283 | p_rlimit(u.u_procp)[RLIMIT_FSIZE].rlim_cur) { | |
284 | #else | |
285 | if ((totalLength + filePos) >> 9 > u.u_rlimit[RLIMIT_FSIZE].rlim_cur) { | |
286 | #endif | |
287 | if (!noLock) | |
288 | ReleaseWriteLock(&avc->lock); | |
289 | afs_DestroyReq(treq); | |
290 | return (EFBIG); | |
291 | } | |
292 | #endif | |
293 | #if defined(AFS_VM_RDWR_ENV) && !defined(AFS_FAKEOPEN_ENV) | |
294 | /* | |
295 | * If write is implemented via VM, afs_FakeOpen() is called from the | |
296 | * high-level write op. | |
297 | */ | |
298 | if (avc->execsOrWriters <= 0) { | |
299 | afs_warn("WARNING: afs_ufswr vcp=%lx, exOrW=%d\n", (unsigned long)avc, | |
300 | avc->execsOrWriters); | |
301 | } | |
302 | #else | |
303 | afs_FakeOpen(avc); | |
304 | #endif | |
305 | avc->f.states |= CDirty; | |
306 | ||
307 | while (totalLength > 0) { | |
308 | tdc = afs_ObtainDCacheForWriting(avc, filePos, totalLength, treq, | |
309 | noLock); | |
310 | if (!tdc) { | |
311 | error = EIO; | |
312 | break; | |
313 | } | |
314 | len = totalLength; /* write this amount by default */ | |
315 | offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk); | |
316 | max = AFS_CHUNKTOSIZE(tdc->f.chunk); /* max size of this chunk */ | |
317 | if (max <= len + offset) { /*if we'd go past the end of this chunk */ | |
318 | /* it won't all fit in this chunk, so write as much | |
319 | * as will fit */ | |
320 | len = max - offset; | |
321 | } | |
322 | ||
323 | if (tuiop) | |
324 | afsio_free(tuiop); | |
325 | trimlen = len; | |
326 | tuiop = afsio_partialcopy(auio, trimlen); | |
327 | AFS_UIO_SETOFFSET(tuiop, offset); | |
328 | ||
329 | code = (*(afs_cacheType->vwriteUIO))(avc, &tdc->f.inode, tuiop); | |
330 | ||
331 | if (code) { | |
332 | void *cfile; | |
333 | ||
334 | error = code; | |
335 | ZapDCE(tdc); /* bad data */ | |
336 | cfile = afs_CFileOpen(&tdc->f.inode); | |
337 | osi_Assert(cfile); | |
338 | afs_CFileTruncate(cfile, 0); | |
339 | afs_CFileClose(cfile); | |
340 | afs_AdjustSize(tdc, 0); /* sets f.chunkSize to 0 */ | |
341 | ||
342 | afs_stats_cmperf.cacheCurrDirtyChunks--; | |
343 | afs_indexFlags[tdc->index] &= ~IFDataMod; /* so it does disappear */ | |
344 | ReleaseWriteLock(&tdc->lock); | |
345 | afs_PutDCache(tdc); | |
346 | break; | |
347 | } | |
348 | /* otherwise we've written some, fixup length, etc and continue with next seg */ | |
349 | len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */ | |
350 | tlen = len; | |
351 | afsio_skip(auio, tlen); /* advance auio over data written */ | |
352 | /* compute new file size */ | |
353 | if (offset + len > tdc->f.chunkBytes) { | |
354 | afs_int32 tlength = offset + len; | |
355 | afs_AdjustSize(tdc, tlength); | |
356 | if (tdc->validPos < filePos + len) | |
357 | tdc->validPos = filePos + len; | |
358 | } | |
359 | totalLength -= len; | |
360 | transferLength += len; | |
361 | filePos += len; | |
362 | #if defined(AFS_SGI_ENV) | |
363 | /* afs_xwrite handles setting m.Length */ | |
364 | osi_Assert(filePos <= avc->f.m.Length); | |
365 | #else | |
366 | if (filePos > avc->f.m.Length) { | |
367 | if (AFS_IS_DISCON_RW) | |
368 | afs_PopulateDCache(avc, filePos, treq); | |
369 | afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING, | |
370 | __FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET, | |
371 | ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET, | |
372 | ICL_HANDLE_OFFSET(filePos)); | |
373 | avc->f.m.Length = filePos; | |
374 | #if defined(AFS_FBSD_ENV) || defined(AFS_DFBSD_ENV) | |
375 | vnode_pager_setsize(vp, filePos); | |
376 | #endif | |
377 | } | |
378 | #endif | |
379 | ReleaseWriteLock(&tdc->lock); | |
380 | afs_PutDCache(tdc); | |
381 | #if !defined(AFS_VM_RDWR_ENV) | |
382 | /* | |
383 | * If write is implemented via VM, afs_DoPartialWrite() is called from | |
384 | * the high-level write op. | |
385 | */ | |
386 | if (!noLock) { | |
387 | code = afs_DoPartialWrite(avc, treq); | |
388 | if (code) { | |
389 | error = code; | |
390 | break; | |
391 | } | |
392 | } | |
393 | #endif | |
394 | } | |
395 | #if !defined(AFS_VM_RDWR_ENV) || defined(AFS_FAKEOPEN_ENV) | |
396 | afs_FakeClose(avc, acred); | |
397 | #endif | |
398 | error = afs_CheckCode(error, treq, 7); | |
399 | /* This set is here so we get the CheckCode. */ | |
400 | if (error && !avc->vc_error) | |
401 | avc->vc_error = error; | |
402 | if (!noLock) | |
403 | ReleaseWriteLock(&avc->lock); | |
404 | if (tuiop) | |
405 | afsio_free(tuiop); | |
406 | ||
407 | #ifndef AFS_VM_RDWR_ENV | |
408 | /* | |
409 | * If write is implemented via VM, afs_fsync() is called from the high-level | |
410 | * write op. | |
411 | */ | |
412 | #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV) | |
413 | if (noLock && (aio & IO_SYNC)) { | |
414 | #else | |
415 | #ifdef AFS_HPUX_ENV | |
416 | /* On hpux on synchronous writes syncio will be set to IO_SYNC. If | |
417 | * we're doing them because the file was opened with O_SYNCIO specified, | |
418 | * we have to look in the u area. No single mechanism here!! | |
419 | */ | |
420 | if (noLock && ((aio & IO_SYNC) | (auio->uio_fpflags & FSYNCIO))) { | |
421 | #else | |
422 | if (noLock && (aio & FSYNC)) { | |
423 | #endif | |
424 | #endif | |
425 | if (!AFS_NFSXLATORREQ(acred)) | |
426 | afs_fsync(avc, acred); | |
427 | } | |
428 | #endif | |
429 | afs_DestroyReq(treq); | |
430 | return error; | |
431 | } | |
432 | ||
433 | /* do partial write if we're low on unmodified chunks */ | |
434 | int | |
435 | afs_DoPartialWrite(struct vcache *avc, struct vrequest *areq) | |
436 | { | |
437 | afs_int32 code; | |
438 | ||
439 | if (afs_stats_cmperf.cacheCurrDirtyChunks <= | |
440 | afs_stats_cmperf.cacheMaxDirtyChunks | |
441 | || AFS_IS_DISCONNECTED) | |
442 | return 0; /* nothing to do */ | |
443 | /* otherwise, call afs_StoreDCache (later try to do this async, if possible) */ | |
444 | afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc, | |
445 | ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); | |
446 | ||
447 | #if defined(AFS_SUN5_ENV) | |
448 | code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL); | |
449 | #else | |
450 | code = afs_StoreAllSegments(avc, areq, AFS_ASYNC); | |
451 | #endif | |
452 | return code; | |
453 | } | |
454 | ||
455 | /* handle any closing cleanup stuff */ | |
456 | int | |
457 | #if defined(AFS_SGI65_ENV) | |
458 | afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose, | |
459 | afs_ucred_t *acred) | |
460 | #elif defined(AFS_SGI64_ENV) | |
461 | afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose, | |
462 | off_t offset, afs_ucred_t *acred, struct flid *flp) | |
463 | #elif defined(AFS_SGI_ENV) | |
464 | afs_close(OSI_VC_DECL(avc), afs_int32 aflags, lastclose_t lastclose | |
465 | off_t offset, afs_ucred_t *acred) | |
466 | #elif defined(AFS_SUN5_ENV) | |
467 | afs_close(OSI_VC_DECL(avc), afs_int32 aflags, int count, offset_t offset, | |
468 | afs_ucred_t *acred) | |
469 | #else | |
470 | afs_close(OSI_VC_DECL(avc), afs_int32 aflags, afs_ucred_t *acred) | |
471 | #endif | |
472 | { | |
473 | afs_int32 code; | |
474 | afs_int32 code_checkcode = 0; | |
475 | struct brequest *tb; | |
476 | struct vrequest *treq = NULL; | |
477 | #ifdef AFS_SGI65_ENV | |
478 | struct flid flid; | |
479 | #endif | |
480 | struct afs_fakestat_state fakestat; | |
481 | OSI_VC_CONVERT(avc); | |
482 | ||
483 | AFS_STATCNT(afs_close); | |
484 | afs_Trace2(afs_iclSetp, CM_TRACE_CLOSE, ICL_TYPE_POINTER, avc, | |
485 | ICL_TYPE_INT32, aflags); | |
486 | code = afs_CreateReq(&treq, acred); | |
487 | if (code) | |
488 | return code; | |
489 | afs_InitFakeStat(&fakestat); | |
490 | code = afs_EvalFakeStat(&avc, &fakestat, treq); | |
491 | if (code) { | |
492 | afs_PutFakeStat(&fakestat); | |
493 | afs_DestroyReq(treq); | |
494 | return code; | |
495 | } | |
496 | AFS_DISCON_LOCK(); | |
497 | #ifdef AFS_SUN5_ENV | |
498 | if (avc->flockCount) { | |
499 | HandleFlock(avc, LOCK_UN, treq, 0, 1 /*onlymine */ ); | |
500 | } | |
501 | #endif | |
502 | #if defined(AFS_SGI_ENV) | |
503 | if (!lastclose) { | |
504 | afs_PutFakeStat(&fakestat); | |
505 | AFS_DISCON_UNLOCK(); | |
506 | afs_DestroyReq(treq); | |
507 | return 0; | |
508 | } | |
509 | /* unlock any locks for pid - could be wrong for child .. */ | |
510 | AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE); | |
511 | # ifdef AFS_SGI65_ENV | |
512 | get_current_flid(&flid); | |
513 | cleanlocks((vnode_t *) avc, flid.fl_pid, flid.fl_sysid); | |
514 | HandleFlock(avc, LOCK_UN, treq, flid.fl_pid, 1 /*onlymine */ ); | |
515 | # else | |
516 | # ifdef AFS_SGI64_ENV | |
517 | cleanlocks((vnode_t *) avc, flp); | |
518 | # else /* AFS_SGI64_ENV */ | |
519 | cleanlocks((vnode_t *) avc, u.u_procp->p_epid, u.u_procp->p_sysid); | |
520 | # endif /* AFS_SGI64_ENV */ | |
521 | HandleFlock(avc, LOCK_UN, treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ ); | |
522 | # endif /* AFS_SGI65_ENV */ | |
523 | /* afs_chkpgoob will drop and re-acquire the global lock. */ | |
524 | afs_chkpgoob(&avc->v, btoc(avc->f.m.Length)); | |
525 | #elif defined(AFS_SUN5_ENV) | |
526 | if (count > 1) { | |
527 | /* The vfs layer may call this repeatedly with higher "count"; only | |
528 | * on the last close (i.e. count = 1) we should actually proceed | |
529 | * with the close. */ | |
530 | afs_PutFakeStat(&fakestat); | |
531 | AFS_DISCON_UNLOCK(); | |
532 | afs_DestroyReq(treq); | |
533 | return 0; | |
534 | } | |
535 | #else | |
536 | if (avc->flockCount) { /* Release Lock */ | |
537 | HandleFlock(avc, LOCK_UN, treq, 0, 1 /*onlymine */ ); | |
538 | } | |
539 | #endif | |
540 | if (aflags & (FWRITE | FTRUNC)) { | |
541 | if (afs_BBusy() || (AFS_NFSXLATORREQ(acred)) || AFS_IS_DISCONNECTED) { | |
542 | /* do it yourself if daemons are all busy */ | |
543 | ObtainWriteLock(&avc->lock, 124); | |
544 | code = afs_StoreOnLastReference(avc, treq); | |
545 | ReleaseWriteLock(&avc->lock); | |
546 | #if defined(AFS_SGI_ENV) | |
547 | AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE); | |
548 | #endif | |
549 | } else { | |
550 | #if defined(AFS_SGI_ENV) | |
551 | AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE); | |
552 | #endif | |
553 | /* at least one daemon is idle, so ask it to do the store. | |
554 | * Also, note that we don't lock it any more... */ | |
555 | tb = afs_BQueue(BOP_STORE, avc, 0, 1, acred, | |
556 | (afs_size_t) afs_cr_uid(acred), (afs_size_t) 0, | |
557 | (void *)0, (void *)0, (void *)0); | |
558 | /* sleep waiting for the store to start, then retrieve error code */ | |
559 | while ((tb->flags & BUVALID) == 0) { | |
560 | tb->flags |= BUWAIT; | |
561 | afs_osi_Sleep(tb); | |
562 | } | |
563 | code = tb->code_raw; | |
564 | code_checkcode = tb->code_checkcode; | |
565 | afs_BRelease(tb); | |
566 | } | |
567 | ||
568 | /* VNOVNODE is "acceptable" error code from close, since | |
569 | * may happen when deleting a file on another machine while | |
570 | * it is open here. */ | |
571 | if (code == VNOVNODE) | |
572 | code = 0; | |
573 | ||
574 | /* Ensure last closer gets the error. If another thread caused | |
575 | * DoPartialWrite and this thread does not actually store the data, | |
576 | * it may not see the quota error. | |
577 | */ | |
578 | ObtainWriteLock(&avc->lock, 406); | |
579 | if (avc->vc_error) { | |
580 | #ifdef AFS_AIX32_ENV | |
581 | osi_ReleaseVM(avc, acred); | |
582 | #endif | |
583 | /* We don't know what the original raw error code was, so set | |
584 | * 'code' to 0. But we have the afs_CheckCode-translated error | |
585 | * code, so put that in code_checkcode. We cannot just set code | |
586 | * to avc->vc_error, since vc_error is a checkcode-translated | |
587 | * error code, and 'code' is supposed to be a raw error code. */ | |
588 | code = 0; | |
589 | code_checkcode = avc->vc_error; | |
590 | avc->vc_error = 0; | |
591 | } | |
592 | ReleaseWriteLock(&avc->lock); | |
593 | ||
594 | /* some codes merit specific complaint */ | |
595 | if (code < 0) { | |
596 | afs_warnuser("afs: failed to store file (network problems)\n"); | |
597 | } | |
598 | #ifdef AFS_SUN5_ENV | |
599 | else if (code == ENOSPC || code_checkcode == ENOSPC) { | |
600 | afs_warnuser | |
601 | ("afs: failed to store file (over quota or partition full)\n"); | |
602 | } | |
603 | #else | |
604 | else if (code == ENOSPC || code_checkcode == ENOSPC) { | |
605 | afs_warnuser("afs: failed to store file (partition full)\n"); | |
606 | } else if (code == EDQUOT || code_checkcode == EDQUOT) { | |
607 | afs_warnuser("afs: failed to store file (over quota)\n"); | |
608 | } | |
609 | #endif | |
610 | else if (code || code_checkcode) | |
611 | afs_warnuser("afs: failed to store file (%d/%d)\n", code, code_checkcode); | |
612 | ||
613 | /* finally, we flush any text pages lying around here */ | |
614 | hzero(avc->flushDV); | |
615 | osi_FlushText(avc); | |
616 | } else { | |
617 | #if defined(AFS_SGI_ENV) | |
618 | AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE); | |
619 | osi_Assert(avc->opens > 0); | |
620 | #endif | |
621 | /* file open for read */ | |
622 | ObtainWriteLock(&avc->lock, 411); | |
623 | if (avc->vc_error) { | |
624 | #ifdef AFS_AIX32_ENV | |
625 | osi_ReleaseVM(avc, acred); | |
626 | #endif | |
627 | code = 0; | |
628 | code_checkcode = avc->vc_error; | |
629 | avc->vc_error = 0; | |
630 | } | |
631 | #if defined(AFS_FBSD80_ENV) | |
632 | /* XXX */ | |
633 | if (!avc->opens) { | |
634 | afs_int32 opens, is_free, is_gone, is_doomed, iflag; | |
635 | struct vnode *vp = AFSTOV(avc); | |
636 | VI_LOCK(vp); | |
637 | is_doomed = vp->v_iflag & VI_DOOMED; | |
638 | is_free = vp->v_iflag & VI_FREE; | |
639 | is_gone = vp->v_iflag & VI_DOINGINACT; | |
640 | iflag = vp->v_iflag; | |
641 | VI_UNLOCK(vp); | |
642 | opens = avc->opens; | |
643 | afs_warn("afs_close avc %p vp %p opens %d free %d doinginact %d doomed %d iflag %d\n", | |
644 | avc, vp, opens, is_free, is_gone, is_doomed, iflag); | |
645 | } | |
646 | #endif | |
647 | avc->opens--; | |
648 | ReleaseWriteLock(&avc->lock); | |
649 | } | |
650 | AFS_DISCON_UNLOCK(); | |
651 | afs_PutFakeStat(&fakestat); | |
652 | ||
653 | if (code_checkcode) { | |
654 | code = code_checkcode; | |
655 | } else { | |
656 | code = afs_CheckCode(code, treq, 5); | |
657 | } | |
658 | afs_DestroyReq(treq); | |
659 | return code; | |
660 | } | |
661 | ||
662 | ||
663 | int | |
664 | #if defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV) | |
665 | afs_fsync(OSI_VC_DECL(avc), int flag, afs_ucred_t *acred | |
666 | # ifdef AFS_SGI65_ENV | |
667 | , off_t start, off_t stop | |
668 | # endif /* AFS_SGI65_ENV */ | |
669 | ) | |
670 | #else /* !SUN5 && !SGI */ | |
671 | afs_fsync(OSI_VC_DECL(avc), afs_ucred_t *acred) | |
672 | #endif | |
673 | { | |
674 | afs_int32 code; | |
675 | struct vrequest *treq = NULL; | |
676 | OSI_VC_CONVERT(avc); | |
677 | ||
678 | if (avc->vc_error) | |
679 | return avc->vc_error; | |
680 | ||
681 | #if defined(AFS_SUN5_ENV) | |
682 | /* back out if called from NFS server */ | |
683 | if (curthread->t_flag & T_DONTPEND) | |
684 | return 0; | |
685 | #endif | |
686 | ||
687 | AFS_STATCNT(afs_fsync); | |
688 | afs_Trace1(afs_iclSetp, CM_TRACE_FSYNC, ICL_TYPE_POINTER, avc); | |
689 | if ((code = afs_CreateReq(&treq, acred))) | |
690 | return code; | |
691 | AFS_DISCON_LOCK(); | |
692 | #if defined(AFS_SGI_ENV) | |
693 | AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE); | |
694 | if (flag & FSYNC_INVAL) | |
695 | osi_VM_FSyncInval(avc); | |
696 | #endif /* AFS_SGI_ENV */ | |
697 | ||
698 | ObtainSharedLock(&avc->lock, 18); | |
699 | code = 0; | |
700 | if (avc->execsOrWriters > 0) { | |
701 | if (!AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) { | |
702 | /* Your average flush. */ | |
703 | ||
704 | /* put the file back */ | |
705 | UpgradeSToWLock(&avc->lock, 41); | |
706 | code = afs_StoreAllSegments(avc, treq, AFS_SYNC); | |
707 | ConvertWToSLock(&avc->lock); | |
708 | } else { | |
709 | UpgradeSToWLock(&avc->lock, 711); | |
710 | afs_DisconAddDirty(avc, VDisconWriteFlush, 1); | |
711 | ConvertWToSLock(&avc->lock); | |
712 | } /* if not disconnected */ | |
713 | } /* if (avc->execsOrWriters > 0) */ | |
714 | ||
715 | #if defined(AFS_SGI_ENV) | |
716 | AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE); | |
717 | if (code == VNOVNODE) { | |
718 | /* syncing an unlinked file! - non-informative to pass an errno | |
719 | * 102 (== VNOVNODE) to user | |
720 | */ | |
721 | code = ENOENT; | |
722 | } | |
723 | #endif | |
724 | AFS_DISCON_UNLOCK(); | |
725 | code = afs_CheckCode(code, treq, 33); | |
726 | afs_DestroyReq(treq); | |
727 | ReleaseSharedLock(&avc->lock); | |
728 | return code; | |
729 | } |