Import Upstream version 1.8.5
[hcoop/debian/openafs.git] / src / afs / VNOPS / afs_vnop_read.c
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10 /*
11 * Implements:
12 * afs_MemRead
13 * afs_PrefetchChunk
14 * afs_UFSRead
15 *
16 */
17
18 #include <afsconfig.h>
19 #include "afs/param.h"
20
21
22 #include "afs/sysincludes.h" /* Standard vendor system headers */
23 #include "afsincludes.h" /* Afs-based standard headers */
24 #include "afs/afs_stats.h" /* statistics */
25 #include "afs/afs_cbqueue.h"
26 #include "afs/nfsclient.h"
27 #include "afs/afs_osidnlc.h"
28 #include "afs/afs_osi.h"
29
30
31 extern char afs_zeros[AFS_ZEROS];
32
33 /* Imported variables */
34 extern afs_rwlock_t afs_xdcache;
35 extern unsigned char *afs_indexFlags;
36 extern afs_hyper_t *afs_indexTimes; /* Dcache entry Access times */
37 extern afs_hyper_t afs_indexCounter; /* Fake time for marking index */
38
39
40 /* Forward declarations */
41 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
42 afs_ucred_t *acred, struct vrequest *areq);
43
44 int
45 afs_read(struct vcache *avc, struct uio *auio, afs_ucred_t *acred,
46 int noLock)
47 {
48 afs_size_t totalLength;
49 afs_size_t transferLength;
50 afs_size_t filePos;
51 afs_size_t offset, tlen;
52 afs_size_t len = 0;
53 afs_int32 trimlen;
54 struct dcache *tdc = 0;
55 afs_int32 error, trybusy = 1;
56 struct uio *tuiop = NULL;
57 afs_int32 code;
58 struct vrequest *treq = NULL;
59
60 AFS_STATCNT(afs_read);
61
62 if (avc->vc_error)
63 return EIO;
64
65 AFS_DISCON_LOCK();
66
67 /* check that we have the latest status info in the vnode cache */
68 if ((code = afs_CreateReq(&treq, acred)))
69 goto out;
70
71 if (!noLock) {
72 if (!avc)
73 osi_Panic("null avc in afs_GenericRead");
74
75 code = afs_VerifyVCache(avc, treq);
76 if (code) {
77 code = afs_CheckCode(code, treq, 8); /* failed to get it */
78 goto out;
79 }
80 }
81 #ifndef AFS_VM_RDWR_ENV
82 if (AFS_NFSXLATORREQ(acred)) {
83 if (!afs_AccessOK
84 (avc, PRSFS_READ, treq,
85 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
86 code = afs_CheckCode(EACCES, treq, 9);
87 goto out;
88 }
89 }
90 #endif
91
92 totalLength = AFS_UIO_RESID(auio);
93 filePos = AFS_UIO_OFFSET(auio);
94 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
95 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_INT32,
96 totalLength, ICL_TYPE_OFFSET,
97 ICL_HANDLE_OFFSET(avc->f.m.Length));
98 error = 0;
99 transferLength = 0;
100 if (!noLock)
101 ObtainReadLock(&avc->lock);
102 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
103 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
104 hset(avc->flushDV, avc->f.m.DataVersion);
105 }
106 #endif
107
108 /*
109 * Locks held:
110 * avc->lock(R)
111 */
112
113 /* This bit is bogus. We're checking to see if the read goes past the
114 * end of the file. If so, we should be zeroing out all of the buffers
115 * that the client has passed into us (there is a danger that we may leak
116 * kernel memory if we do not). However, this behaviour is disabled by
117 * not setting len before this segment runs, and by setting len to 0
118 * immediately we enter it. In addition, we also need to check for a read
119 * which partially goes off the end of the file in the while loop below.
120 */
121
122 if (filePos >= avc->f.m.Length) {
123 if (len > AFS_ZEROS)
124 len = sizeof(afs_zeros); /* and in 0 buffer */
125 len = 0;
126 trimlen = len;
127 tuiop = afsio_partialcopy(auio, trimlen);
128 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code);
129 }
130
131 while (avc->f.m.Length > 0 && totalLength > 0) {
132 /* read all of the cached info */
133 if (filePos >= avc->f.m.Length)
134 break; /* all done */
135 if (noLock) {
136 if (tdc) {
137 ReleaseReadLock(&tdc->lock);
138 afs_PutDCache(tdc);
139 }
140 tdc = afs_FindDCache(avc, filePos);
141 if (tdc) {
142 ObtainReadLock(&tdc->lock);
143 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
144 len = tdc->validPos - filePos;
145 }
146 } else {
147 /* a tricky question: does the presence of the DFFetching flag
148 * mean that we're fetching the latest version of the file? No.
149 * The server could update the file as soon as the fetch responsible
150 * for the setting of the DFFetching flag completes.
151 *
152 * However, the presence of the DFFetching flag (visible under
153 * a dcache read lock since it is set and cleared only under a
154 * dcache write lock) means that we're fetching as good a version
155 * as was known to this client at the time of the last call to
156 * afs_VerifyVCache, since the latter updates the stat cache's
157 * m.DataVersion field under a vcache write lock, and from the
158 * time that the DFFetching flag goes on in afs_GetDCache (before
159 * the fetch starts), to the time it goes off (after the fetch
160 * completes), afs_GetDCache keeps at least a read lock on the
161 * vcache entry.
162 *
163 * This means that if the DFFetching flag is set, we can use that
164 * data for any reads that must come from the current version of
165 * the file (current == m.DataVersion).
166 *
167 * Another way of looking at this same point is this: if we're
168 * fetching some data and then try do an afs_VerifyVCache, the
169 * VerifyVCache operation will not complete until after the
170 * DFFetching flag is turned off and the dcache entry's f.versionNo
171 * field is updated.
172 *
173 * Note, by the way, that if DFFetching is set,
174 * m.DataVersion > f.versionNo (the latter is not updated until
175 * after the fetch completes).
176 */
177 if (tdc) {
178 ReleaseReadLock(&tdc->lock);
179 afs_PutDCache(tdc); /* before reusing tdc */
180 }
181 tdc = afs_GetDCache(avc, filePos, treq, &offset, &len, 2);
182 if (!tdc) {
183 error = ENETDOWN;
184 break;
185 }
186
187 ObtainReadLock(&tdc->lock);
188 /* now, first try to start transfer, if we'll need the data. If
189 * data already coming, we don't need to do this, obviously. Type
190 * 2 requests never return a null dcache entry, btw.
191 */
192 if (!(tdc->dflags & DFFetching)
193 && !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
194 /* have cache entry, it is not coming in now,
195 * and we'll need new data */
196 tagain:
197 if (trybusy && !afs_BBusy()) {
198 struct brequest *bp;
199 /* daemon is not busy */
200 ObtainSharedLock(&tdc->mflock, 665);
201 if (!(tdc->mflags & DFFetchReq)) {
202 /* start the daemon (may already be running, however) */
203 UpgradeSToWLock(&tdc->mflock, 666);
204 tdc->mflags |= DFFetchReq;
205 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
206 (afs_size_t) filePos, (afs_size_t) 0,
207 tdc, NULL, NULL);
208 if (!bp) {
209 /* Bkg table full; retry deadlocks */
210 tdc->mflags &= ~DFFetchReq;
211 trybusy = 0; /* Avoid bkg daemon since they're too busy */
212 ReleaseWriteLock(&tdc->mflock);
213 goto tagain;
214 }
215 ConvertWToSLock(&tdc->mflock);
216 /* don't use bp pointer! */
217 }
218 code = 0;
219 ConvertSToRLock(&tdc->mflock);
220 while (!code && tdc->mflags & DFFetchReq) {
221 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
222 ICL_TYPE_STRING, __FILE__, ICL_TYPE_INT32,
223 __LINE__, ICL_TYPE_POINTER, tdc,
224 ICL_TYPE_INT32, tdc->dflags);
225 /* don't need waiting flag on this one */
226 ReleaseReadLock(&tdc->mflock);
227 ReleaseReadLock(&tdc->lock);
228 ReleaseReadLock(&avc->lock);
229 code = afs_osi_SleepSig(&tdc->validPos);
230 ObtainReadLock(&avc->lock);
231 ObtainReadLock(&tdc->lock);
232 ObtainReadLock(&tdc->mflock);
233 }
234 ReleaseReadLock(&tdc->mflock);
235 if (code) {
236 error = code;
237 break;
238 }
239 }
240 }
241 /* now data may have started flowing in (if DFFetching is on). If
242 * data is now streaming in, then wait for some interesting stuff.
243 */
244 code = 0;
245 while (!code && (tdc->dflags & DFFetching)
246 && tdc->validPos <= filePos) {
247 /* too early: wait for DFFetching flag to vanish,
248 * or data to appear */
249 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
250 __FILE__, ICL_TYPE_INT32, __LINE__,
251 ICL_TYPE_POINTER, tdc, ICL_TYPE_INT32,
252 tdc->dflags);
253 ReleaseReadLock(&tdc->lock);
254 ReleaseReadLock(&avc->lock);
255 code = afs_osi_SleepSig(&tdc->validPos);
256 ObtainReadLock(&avc->lock);
257 ObtainReadLock(&tdc->lock);
258 }
259 if (code) {
260 error = code;
261 break;
262 }
263 /* fetching flag gone, data is here, or we never tried
264 * (BBusy for instance) */
265 if (tdc->dflags & DFFetching) {
266 /* still fetching, some new data is here:
267 * compute length and offset */
268 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
269 len = tdc->validPos - filePos;
270 } else {
271 /* no longer fetching, verify data version
272 * (avoid new GetDCache call) */
273 if (hsame(avc->f.m.DataVersion, tdc->f.versionNo)
274 && ((len = tdc->validPos - filePos) > 0)) {
275 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
276 } else {
277 /* don't have current data, so get it below */
278 afs_Trace3(afs_iclSetp, CM_TRACE_VERSIONNO,
279 ICL_TYPE_INT64, ICL_HANDLE_OFFSET(filePos),
280 ICL_TYPE_HYPER, &avc->f.m.DataVersion,
281 ICL_TYPE_HYPER, &tdc->f.versionNo);
282 ReleaseReadLock(&tdc->lock);
283 afs_PutDCache(tdc);
284 tdc = NULL;
285 }
286 }
287
288 if (!tdc) {
289 /* If we get, it was not possible to start the
290 * background daemon. With flag == 1 afs_GetDCache
291 * does the FetchData rpc synchronously.
292 */
293 ReleaseReadLock(&avc->lock);
294 tdc = afs_GetDCache(avc, filePos, treq, &offset, &len, 1);
295 ObtainReadLock(&avc->lock);
296 if (tdc)
297 ObtainReadLock(&tdc->lock);
298 }
299 }
300
301 afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD, ICL_TYPE_POINTER, tdc,
302 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset),
303 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len));
304 if (!tdc) {
305 error = EIO;
306 break;
307 }
308
309 /*
310 * Locks held:
311 * avc->lock(R)
312 * tdc->lock(R)
313 */
314
315 if (len > totalLength)
316 len = totalLength; /* will read len bytes */
317 if (len <= 0) { /* shouldn't get here if DFFetching is on */
318 /* read past the end of a chunk, may not be at next chunk yet, and yet
319 * also not at eof, so may have to supply fake zeros */
320 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
321 if (len > totalLength)
322 len = totalLength; /* and still within xfr request */
323 tlen = avc->f.m.Length - offset; /* and still within file */
324 if (len > tlen)
325 len = tlen;
326 if (len > AFS_ZEROS)
327 len = sizeof(afs_zeros); /* and in 0 buffer */
328 trimlen = len;
329 tuiop = afsio_partialcopy(auio, trimlen);
330 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code);
331 if (code) {
332 error = code;
333 break;
334 }
335 } else {
336 /* get the data from the cache */
337
338 /* mung uio structure to be right for this transfer */
339 trimlen = len;
340 tuiop = afsio_partialcopy(auio, trimlen);
341 AFS_UIO_SETOFFSET(tuiop, offset);
342
343 code = (*(afs_cacheType->vreadUIO))(&tdc->f.inode, tuiop);
344
345 if (code) {
346 error = code;
347 break;
348 }
349 }
350 /* otherwise we've read some, fixup length, etc and continue with next seg */
351 len = len - AFS_UIO_RESID(tuiop); /* compute amount really transferred */
352 trimlen = len;
353 afsio_skip(auio, trimlen); /* update input uio structure */
354 totalLength -= len;
355 transferLength += len;
356 filePos += len;
357
358 if (len <= 0)
359 break; /* surprise eof */
360 if (tuiop) {
361 afsio_free(tuiop);
362 tuiop = NULL;
363 }
364 } /* the whole while loop */
365
366 /*
367 * Locks held:
368 * avc->lock(R)
369 * tdc->lock(R) if tdc
370 */
371
372 /* if we make it here with tdc non-zero, then it is the last chunk we
373 * dealt with, and we have to release it when we're done. We hold on
374 * to it in case we need to do a prefetch.
375 */
376 if (tdc) {
377 ReleaseReadLock(&tdc->lock);
378 #if !defined(AFS_VM_RDWR_ENV)
379 /* try to queue prefetch, if needed */
380 if (!noLock) {
381 if (!(tdc->mflags &DFNextStarted))
382 afs_PrefetchChunk(avc, tdc, acred, treq);
383 }
384 #endif
385 afs_PutDCache(tdc);
386 }
387 if (!noLock)
388 ReleaseReadLock(&avc->lock);
389
390 code = afs_CheckCode(error, treq, 10);
391
392 if (tuiop)
393 afsio_free(tuiop);
394
395 out:
396 AFS_DISCON_UNLOCK();
397 afs_DestroyReq(treq);
398 return code;
399 }
400
401 /* called with the dcache entry triggering the fetch, the vcache entry involved,
402 * and a vrequest for the read call. Marks the dcache entry as having already
403 * triggered a prefetch, starts the prefetch going and sets the DFFetchReq
404 * flag in the prefetched block, so that the next call to read knows to wait
405 * for the daemon to start doing things.
406 *
407 * This function must be called with the vnode at least read-locked, and
408 * no locks on the dcache, because it plays around with dcache entries.
409 */
410 void
411 afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
412 afs_ucred_t *acred, struct vrequest *areq)
413 {
414 struct dcache *tdc;
415 afs_size_t offset;
416 afs_size_t j1, j2; /* junk vbls for GetDCache to trash */
417
418 offset = adc->f.chunk + 1; /* next chunk we'll need */
419 offset = AFS_CHUNKTOBASE(offset); /* base of next chunk */
420 ObtainReadLock(&adc->lock);
421 ObtainSharedLock(&adc->mflock, 662);
422 if (offset < avc->f.m.Length && !(adc->mflags & DFNextStarted)
423 && !afs_BBusy()) {
424 struct brequest *bp;
425
426 UpgradeSToWLock(&adc->mflock, 663);
427 adc->mflags |= DFNextStarted; /* we've tried to prefetch for this guy */
428 ReleaseWriteLock(&adc->mflock);
429 ReleaseReadLock(&adc->lock);
430
431 tdc = afs_GetDCache(avc, offset, areq, &j1, &j2, 2); /* type 2 never returns 0 */
432 /*
433 * In disconnected mode, type 2 can return 0 because it doesn't
434 * make any sense to allocate a dcache we can never fill
435 */
436 if (tdc == NULL)
437 return;
438
439 ObtainSharedLock(&tdc->mflock, 651);
440 if (!(tdc->mflags & DFFetchReq)) {
441 /* ask the daemon to do the work */
442 UpgradeSToWLock(&tdc->mflock, 652);
443 tdc->mflags |= DFFetchReq; /* guaranteed to be cleared by BKG or GetDCache */
444 /* last parm (1) tells bkg daemon to do an afs_PutDCache when it is done,
445 * since we don't want to wait for it to finish before doing so ourselves.
446 */
447 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
448 (afs_size_t) offset, (afs_size_t) 1, tdc,
449 (void *)0, (void *)0);
450 if (!bp) {
451 /* Bkg table full; just abort non-important prefetching to avoid deadlocks */
452 tdc->mflags &= ~DFFetchReq;
453 ReleaseWriteLock(&tdc->mflock);
454 afs_PutDCache(tdc);
455
456 /*
457 * DCLOCKXXX: This is a little sketchy, since someone else
458 * could have already started a prefetch.. In practice,
459 * this probably doesn't matter; at most it would cause an
460 * extra slot in the BKG table to be used up when someone
461 * prefetches this for the second time.
462 */
463 ObtainReadLock(&adc->lock);
464 ObtainWriteLock(&adc->mflock, 664);
465 adc->mflags &= ~DFNextStarted;
466 ReleaseWriteLock(&adc->mflock);
467 ReleaseReadLock(&adc->lock);
468 } else {
469 ReleaseWriteLock(&tdc->mflock);
470 }
471 } else {
472 ReleaseSharedLock(&tdc->mflock);
473 afs_PutDCache(tdc);
474 }
475 } else {
476 ReleaseSharedLock(&adc->mflock);
477 ReleaseReadLock(&adc->lock);
478 }
479 }
480
481 int
482 afs_UFSReadUIO(afs_dcache_id_t *cacheId, struct uio *tuiop)
483 {
484 int code;
485 struct osi_file *tfile;
486
487 tfile = (struct osi_file *) osi_UFSOpen(cacheId);
488 if (!tfile)
489 return -1;
490
491 #if defined(AFS_AIX41_ENV)
492 AFS_GUNLOCK();
493 code =
494 VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, tuiop, NULL, NULL,
495 NULL, afs_osi_credp);
496 AFS_GLOCK();
497 #elif defined(AFS_AIX32_ENV)
498 code =
499 VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, tuiop, NULL, NULL);
500 /* Flush all JFS pages now for big performance gain in big file cases
501 * If we do something like this, must check to be sure that AFS file
502 * isn't mmapped... see afs_gn_map() for why.
503 */
504 /*
505 if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) {
506 any different ways to do similar things:
507 so far, the best performing one is #2, but #1 might match it if we
508 straighten out the confusion regarding which pages to flush. It
509 really does matter.
510 1. vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1);
511 2. vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
512 (len + PAGESIZE-1)/PAGESIZE);
513 3. vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly
514 4. vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails
515 tfile->vnode->v_gnode->gn_seg = NULL;
516 5. deletep
517 6. ipgrlse
518 7. ifreeseg
519 Unfortunately, this seems to cause frequent "cache corruption" episodes.
520 vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
521 (len + PAGESIZE-1)/PAGESIZE);
522 }
523 */
524 #elif defined(AFS_AIX_ENV)
525 code =
526 VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t) & offset,
527 tuiop, NULL, NULL, -1);
528 #elif defined(AFS_SUN5_ENV)
529 AFS_GUNLOCK();
530 #ifdef AFS_SUN510_ENV
531 VOP_RWLOCK(tfile->vnode, 0, NULL);
532 code = VOP_READ(tfile->vnode, tuiop, 0, afs_osi_credp, NULL);
533 VOP_RWUNLOCK(tfile->vnode, 0, NULL);
534 #else
535 VOP_RWLOCK(tfile->vnode, 0);
536 code = VOP_READ(tfile->vnode, tuiop, 0, afs_osi_credp);
537 VOP_RWUNLOCK(tfile->vnode, 0);
538 #endif
539 AFS_GLOCK();
540 #elif defined(AFS_SGI_ENV)
541 AFS_GUNLOCK();
542 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
543 AFS_VOP_READ(tfile->vnode, tuiop, IO_ISLOCKED, afs_osi_credp,
544 code);
545 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
546 AFS_GLOCK();
547 #elif defined(AFS_HPUX100_ENV)
548 AFS_GUNLOCK();
549 code = VOP_RDWR(tfile->vnode, tuiop, UIO_READ, 0, afs_osi_credp);
550 AFS_GLOCK();
551 #elif defined(AFS_LINUX20_ENV)
552 AFS_GUNLOCK();
553 code = osi_rdwr(tfile, tuiop, UIO_READ);
554 AFS_GLOCK();
555 #elif defined(AFS_DARWIN80_ENV)
556 AFS_GUNLOCK();
557 code = VNOP_READ(tfile->vnode, tuiop, 0, afs_osi_ctxtp);
558 AFS_GLOCK();
559 #elif defined(AFS_DARWIN_ENV)
560 AFS_GUNLOCK();
561 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
562 code = VOP_READ(tfile->vnode, tuiop, 0, afs_osi_credp);
563 VOP_UNLOCK(tfile->vnode, 0, current_proc());
564 AFS_GLOCK();
565 #elif defined(AFS_FBSD80_ENV)
566 AFS_GUNLOCK();
567 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
568 code = VOP_READ(tfile->vnode, tuiop, 0, afs_osi_credp);
569 VOP_UNLOCK(tfile->vnode, 0);
570 AFS_GLOCK();
571 #elif defined(AFS_FBSD_ENV)
572 AFS_GUNLOCK();
573 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curthread);
574 code = VOP_READ(tfile->vnode, tuiop, 0, afs_osi_credp);
575 VOP_UNLOCK(tfile->vnode, 0, curthread);
576 AFS_GLOCK();
577 #elif defined(AFS_NBSD_ENV)
578 tuiop->uio_rw = UIO_READ;
579 AFS_GUNLOCK();
580 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE);
581 code = VOP_READ(tfile->vnode, tuiop, 0, afs_osi_credp);
582 # if defined(AFS_NBSD60_ENV)
583 VOP_UNLOCK(tfile->vnode);
584 # else
585 VOP_UNLOCK(tfile->vnode, 0);
586 # endif
587 AFS_GLOCK();
588 #elif defined(AFS_XBSD_ENV)
589 AFS_GUNLOCK();
590 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
591 code = VOP_READ(tfile->vnode, tuiop, 0, afs_osi_credp);
592 VOP_UNLOCK(tfile->vnode, 0, curproc);
593 AFS_GLOCK();
594 #else
595 code = VOP_RDWR(tfile->vnode, tuiop, UIO_READ, 0, afs_osi_credp);
596 #endif
597 osi_UFSClose(tfile);
598
599 return code;
600 }