Import Upstream version 1.8.5
[hcoop/debian/openafs.git] / src / afs / LINUX / osi_vnodeops.c
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10 /*
11 * Linux specific vnodeops. Also includes the glue routines required to call
12 * AFS vnodeops.
13 *
14 * So far the only truly scary part is that Linux relies on the inode cache
15 * to be up to date. Don't you dare break a callback and expect an fstat
16 * to give you meaningful information. This appears to be fixed in the 2.1
17 * development kernels. As it is we can fix this now by intercepting the
18 * stat calls.
19 */
20
21 #include <afsconfig.h>
22 #include "afs/param.h"
23
24
25 #include "afs/sysincludes.h"
26 #include "afsincludes.h"
27 #include "afs/afs_stats.h"
28 #include <linux/mm.h>
29 #ifdef HAVE_MM_INLINE_H
30 #include <linux/mm_inline.h>
31 #endif
32 #include <linux/pagemap.h>
33 #include <linux/writeback.h>
34 #include <linux/pagevec.h>
35 #include <linux/aio.h>
36 #include "afs/lock.h"
37 #include "afs/afs_bypasscache.h"
38
39 #include "osi_compat.h"
40 #include "osi_pagecopy.h"
41
42 #ifndef HAVE_LINUX_PAGEVEC_LRU_ADD_FILE
43 #define __pagevec_lru_add_file __pagevec_lru_add
44 #endif
45
46 #ifndef MAX_ERRNO
47 #define MAX_ERRNO 1000L
48 #endif
49
50 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
51 /* Enable our workaround for a race with d_splice_alias. The race was fixed in
52 * 2.6.34, so don't do it after that point. */
53 # define D_SPLICE_ALIAS_RACE
54 #endif
55
56 /* Workaround for RH 7.5 which introduced file operation iterate() but requires
57 * each file->f_mode to be marked with FMODE_KABI_ITERATE. Instead OpenAFS will
58 * continue to use file opearation readdir() in this case.
59 */
60 #if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE) && !defined(FMODE_KABI_ITERATE)
61 #define USE_FOP_ITERATE 1
62 #else
63 #undef USE_FOP_ITERATE
64 #endif
65
66 int cachefs_noreadpage = 0;
67
68 extern struct backing_dev_info *afs_backing_dev_info;
69
70 extern struct vcache *afs_globalVp;
71
72 /* This function converts a positive error code from AFS into a negative
73 * code suitable for passing into the Linux VFS layer. It checks that the
74 * error code is within the permissable bounds for the ERR_PTR mechanism.
75 *
76 * _All_ error codes which come from the AFS layer should be passed through
77 * this function before being returned to the kernel.
78 */
79
80 static inline int
81 afs_convert_code(int code) {
82 if ((code >= 0) && (code <= MAX_ERRNO))
83 return -code;
84 else
85 return -EIO;
86 }
87
88 /* Linux doesn't require a credp for many functions, and crref is an expensive
89 * operation. This helper function avoids obtaining it for VerifyVCache calls
90 */
91
92 static inline int
93 afs_linux_VerifyVCache(struct vcache *avc, cred_t **retcred) {
94 cred_t *credp = NULL;
95 struct vrequest *treq = NULL;
96 int code;
97
98 if (avc->f.states & CStatd) {
99 if (retcred)
100 *retcred = NULL;
101 return 0;
102 }
103
104 credp = crref();
105
106 code = afs_CreateReq(&treq, credp);
107 if (code == 0) {
108 code = afs_VerifyVCache2(avc, treq);
109 afs_DestroyReq(treq);
110 }
111
112 if (retcred != NULL)
113 *retcred = credp;
114 else
115 crfree(credp);
116
117 return afs_convert_code(code);
118 }
119
120 #if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER) || defined(HAVE_LINUX_GENERIC_FILE_AIO_READ)
121 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
122 static ssize_t
123 afs_linux_read_iter(struct kiocb *iocb, struct iov_iter *iter)
124 # elif defined(LINUX_HAS_NONVECTOR_AIO)
125 static ssize_t
126 afs_linux_aio_read(struct kiocb *iocb, char __user *buf, size_t bufsize,
127 loff_t pos)
128 # else
129 static ssize_t
130 afs_linux_aio_read(struct kiocb *iocb, const struct iovec *buf,
131 unsigned long bufsize, loff_t pos)
132 # endif
133 {
134 struct file *fp = iocb->ki_filp;
135 ssize_t code = 0;
136 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
137 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
138 loff_t pos = iocb->ki_pos;
139 unsigned long bufsize = iter->nr_segs;
140 # endif
141
142
143 AFS_GLOCK();
144 afs_Trace4(afs_iclSetp, CM_TRACE_AIOREADOP, ICL_TYPE_POINTER, vcp,
145 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
146 (afs_int32)bufsize, ICL_TYPE_INT32, 99999);
147 code = afs_linux_VerifyVCache(vcp, NULL);
148
149 if (code == 0) {
150 /* Linux's FlushPages implementation doesn't ever use credp,
151 * so we optimise by not using it */
152 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
153 AFS_GUNLOCK();
154 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
155 code = generic_file_read_iter(iocb, iter);
156 # else
157 code = generic_file_aio_read(iocb, buf, bufsize, pos);
158 # endif
159 AFS_GLOCK();
160 }
161
162 afs_Trace4(afs_iclSetp, CM_TRACE_AIOREADOP, ICL_TYPE_POINTER, vcp,
163 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
164 (afs_int32)bufsize, ICL_TYPE_INT32, code);
165 AFS_GUNLOCK();
166 return code;
167 }
168 #else
169 static ssize_t
170 afs_linux_read(struct file *fp, char *buf, size_t count, loff_t * offp)
171 {
172 ssize_t code = 0;
173 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
174
175 AFS_GLOCK();
176 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
177 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
178 99999);
179 code = afs_linux_VerifyVCache(vcp, NULL);
180
181 if (code == 0) {
182 /* Linux's FlushPages implementation doesn't ever use credp,
183 * so we optimise by not using it */
184 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
185 AFS_GUNLOCK();
186 code = do_sync_read(fp, buf, count, offp);
187 AFS_GLOCK();
188 }
189
190 afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
191 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
192 code);
193 AFS_GUNLOCK();
194 return code;
195 }
196 #endif
197
198
199 /* Now we have integrated VM for writes as well as reads. the generic write operations
200 * also take care of re-positioning the pointer if file is open in append
201 * mode. Call fake open/close to ensure we do writes of core dumps.
202 */
203 #if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER) || defined(HAVE_LINUX_GENERIC_FILE_AIO_READ)
204 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
205 static ssize_t
206 afs_linux_write_iter(struct kiocb *iocb, struct iov_iter *iter)
207 # elif defined(LINUX_HAS_NONVECTOR_AIO)
208 static ssize_t
209 afs_linux_aio_write(struct kiocb *iocb, const char __user *buf, size_t bufsize,
210 loff_t pos)
211 # else
212 static ssize_t
213 afs_linux_aio_write(struct kiocb *iocb, const struct iovec *buf,
214 unsigned long bufsize, loff_t pos)
215 # endif
216 {
217 ssize_t code = 0;
218 struct vcache *vcp = VTOAFS(iocb->ki_filp->f_dentry->d_inode);
219 cred_t *credp;
220 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
221 loff_t pos = iocb->ki_pos;
222 unsigned long bufsize = iter->nr_segs;
223 # endif
224
225 AFS_GLOCK();
226
227 afs_Trace4(afs_iclSetp, CM_TRACE_AIOWRITEOP, ICL_TYPE_POINTER, vcp,
228 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
229 (afs_int32)bufsize, ICL_TYPE_INT32,
230 (iocb->ki_filp->f_flags & O_APPEND) ? 99998 : 99999);
231
232 code = afs_linux_VerifyVCache(vcp, &credp);
233
234 ObtainWriteLock(&vcp->lock, 529);
235 afs_FakeOpen(vcp);
236 ReleaseWriteLock(&vcp->lock);
237 if (code == 0) {
238 AFS_GUNLOCK();
239 # if defined(STRUCT_FILE_OPERATIONS_HAS_READ_ITER)
240 code = generic_file_write_iter(iocb, iter);
241 # else
242 code = generic_file_aio_write(iocb, buf, bufsize, pos);
243 # endif
244 AFS_GLOCK();
245 }
246
247 ObtainWriteLock(&vcp->lock, 530);
248
249 if (vcp->execsOrWriters == 1 && !credp)
250 credp = crref();
251
252 afs_FakeClose(vcp, credp);
253 ReleaseWriteLock(&vcp->lock);
254
255 afs_Trace4(afs_iclSetp, CM_TRACE_AIOWRITEOP, ICL_TYPE_POINTER, vcp,
256 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
257 (afs_int32)bufsize, ICL_TYPE_INT32, code);
258
259 if (credp)
260 crfree(credp);
261 AFS_GUNLOCK();
262 return code;
263 }
264 #else
265 static ssize_t
266 afs_linux_write(struct file *fp, const char *buf, size_t count, loff_t * offp)
267 {
268 ssize_t code = 0;
269 struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
270 cred_t *credp;
271
272 AFS_GLOCK();
273
274 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
275 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
276 (fp->f_flags & O_APPEND) ? 99998 : 99999);
277
278 code = afs_linux_VerifyVCache(vcp, &credp);
279
280 ObtainWriteLock(&vcp->lock, 529);
281 afs_FakeOpen(vcp);
282 ReleaseWriteLock(&vcp->lock);
283 if (code == 0) {
284 AFS_GUNLOCK();
285 code = do_sync_write(fp, buf, count, offp);
286 AFS_GLOCK();
287 }
288
289 ObtainWriteLock(&vcp->lock, 530);
290
291 if (vcp->execsOrWriters == 1 && !credp)
292 credp = crref();
293
294 afs_FakeClose(vcp, credp);
295 ReleaseWriteLock(&vcp->lock);
296
297 afs_Trace4(afs_iclSetp, CM_TRACE_WRITEOP, ICL_TYPE_POINTER, vcp,
298 ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
299 code);
300
301 if (credp)
302 crfree(credp);
303 AFS_GUNLOCK();
304 return code;
305 }
306 #endif
307
308 extern int BlobScan(struct dcache * afile, afs_int32 ablob, afs_int32 *ablobOut);
309
310 /* This is a complete rewrite of afs_readdir, since we can make use of
311 * filldir instead of afs_readdir_move. Note that changes to vcache/dcache
312 * handling and use of bulkstats will need to be reflected here as well.
313 */
314 static int
315 #if defined(USE_FOP_ITERATE)
316 afs_linux_readdir(struct file *fp, struct dir_context *ctx)
317 #else
318 afs_linux_readdir(struct file *fp, void *dirbuf, filldir_t filldir)
319 #endif
320 {
321 struct vcache *avc = VTOAFS(FILE_INODE(fp));
322 struct vrequest *treq = NULL;
323 struct dcache *tdc;
324 int code;
325 int offset;
326 afs_int32 dirpos;
327 struct DirEntry *de;
328 struct DirBuffer entry;
329 ino_t ino;
330 int len;
331 afs_size_t origOffset, tlen;
332 cred_t *credp = crref();
333 struct afs_fakestat_state fakestat;
334
335 AFS_GLOCK();
336 AFS_STATCNT(afs_readdir);
337
338 code = afs_convert_code(afs_CreateReq(&treq, credp));
339 crfree(credp);
340 if (code)
341 goto out1;
342
343 afs_InitFakeStat(&fakestat);
344 code = afs_convert_code(afs_EvalFakeStat(&avc, &fakestat, treq));
345 if (code)
346 goto out;
347
348 /* update the cache entry */
349 tagain:
350 code = afs_convert_code(afs_VerifyVCache2(avc, treq));
351 if (code)
352 goto out;
353
354 /* get a reference to the entire directory */
355 tdc = afs_GetDCache(avc, (afs_size_t) 0, treq, &origOffset, &tlen, 1);
356 len = tlen;
357 if (!tdc) {
358 code = -EIO;
359 goto out;
360 }
361 ObtainWriteLock(&avc->lock, 811);
362 ObtainReadLock(&tdc->lock);
363 /*
364 * Make sure that the data in the cache is current. There are two
365 * cases we need to worry about:
366 * 1. The cache data is being fetched by another process.
367 * 2. The cache data is no longer valid
368 */
369 while ((avc->f.states & CStatd)
370 && (tdc->dflags & DFFetching)
371 && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
372 ReleaseReadLock(&tdc->lock);
373 ReleaseWriteLock(&avc->lock);
374 afs_osi_Sleep(&tdc->validPos);
375 ObtainWriteLock(&avc->lock, 812);
376 ObtainReadLock(&tdc->lock);
377 }
378 if (!(avc->f.states & CStatd)
379 || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
380 ReleaseReadLock(&tdc->lock);
381 ReleaseWriteLock(&avc->lock);
382 afs_PutDCache(tdc);
383 goto tagain;
384 }
385
386 /* Set the readdir-in-progress flag, and downgrade the lock
387 * to shared so others will be able to acquire a read lock.
388 */
389 avc->f.states |= CReadDir;
390 avc->dcreaddir = tdc;
391 avc->readdir_pid = MyPidxx2Pid(MyPidxx);
392 ConvertWToSLock(&avc->lock);
393
394 /* Fill in until we get an error or we're done. This implementation
395 * takes an offset in units of blobs, rather than bytes.
396 */
397 code = 0;
398 #if defined(USE_FOP_ITERATE)
399 offset = ctx->pos;
400 #else
401 offset = (int) fp->f_pos;
402 #endif
403 while (1) {
404 dirpos = 0;
405 code = BlobScan(tdc, offset, &dirpos);
406 if (code == 0 && dirpos == 0) {
407 /* We've reached EOF of the dir blob, so we can stop looking for
408 * entries. */
409 break;
410 }
411
412 if (code == 0) {
413 code = afs_dir_GetVerifiedBlob(tdc, dirpos, &entry);
414 }
415 if (code) {
416 if (!(avc->f.states & CCorrupt)) {
417 struct cell *tc = afs_GetCellStale(avc->f.fid.Cell, READ_LOCK);
418 afs_warn("afs: Corrupt directory (%d.%d.%d.%d [%s] @%lx, pos %d)\n",
419 avc->f.fid.Cell, avc->f.fid.Fid.Volume,
420 avc->f.fid.Fid.Vnode, avc->f.fid.Fid.Unique,
421 tc ? tc->cellName : "",
422 (unsigned long)&tdc->f.inode, dirpos);
423 if (tc)
424 afs_PutCell(tc, READ_LOCK);
425 UpgradeSToWLock(&avc->lock, 814);
426 avc->f.states |= CCorrupt;
427 }
428 code = -EIO;
429 goto unlock_out;
430 }
431
432 de = (struct DirEntry *)entry.data;
433 ino = afs_calc_inum (avc->f.fid.Cell, avc->f.fid.Fid.Volume,
434 ntohl(de->fid.vnode));
435 len = strlen(de->name);
436
437 /* filldir returns -EINVAL when the buffer is full. */
438 {
439 unsigned int type = DT_UNKNOWN;
440 struct VenusFid afid;
441 struct vcache *tvc;
442 int vtype;
443 afid.Cell = avc->f.fid.Cell;
444 afid.Fid.Volume = avc->f.fid.Fid.Volume;
445 afid.Fid.Vnode = ntohl(de->fid.vnode);
446 afid.Fid.Unique = ntohl(de->fid.vunique);
447 if ((avc->f.states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
448 type = DT_DIR;
449 } else if ((tvc = afs_FindVCache(&afid, 0, 0))) {
450 if (tvc->mvstat != AFS_MVSTAT_FILE) {
451 type = DT_DIR;
452 } else if (((tvc->f.states) & (CStatd | CTruth))) {
453 /* CTruth will be set if the object has
454 *ever* been statd */
455 vtype = vType(tvc);
456 if (vtype == VDIR)
457 type = DT_DIR;
458 else if (vtype == VREG)
459 type = DT_REG;
460 /* Don't do this until we're sure it can't be a mtpt */
461 /* else if (vtype == VLNK)
462 * type=DT_LNK; */
463 /* what other types does AFS support? */
464 }
465 /* clean up from afs_FindVCache */
466 afs_PutVCache(tvc);
467 }
468 /*
469 * If this is NFS readdirplus, then the filler is going to
470 * call getattr on this inode, which will deadlock if we're
471 * holding the GLOCK.
472 */
473 AFS_GUNLOCK();
474 #if defined(USE_FOP_ITERATE)
475 /* dir_emit returns a bool - true when it succeeds.
476 * Inverse the result to fit with how we check "code" */
477 code = !dir_emit(ctx, de->name, len, ino, type);
478 #else
479 code = (*filldir) (dirbuf, de->name, len, offset, ino, type);
480 #endif
481 AFS_GLOCK();
482 }
483 DRelease(&entry, 0);
484 if (code)
485 break;
486 offset = dirpos + 1 + ((len + 16) >> 5);
487 }
488 /* If filldir didn't fill in the last one this is still pointing to that
489 * last attempt.
490 */
491 code = 0;
492
493 unlock_out:
494 #if defined(USE_FOP_ITERATE)
495 ctx->pos = (loff_t) offset;
496 #else
497 fp->f_pos = (loff_t) offset;
498 #endif
499 ReleaseReadLock(&tdc->lock);
500 afs_PutDCache(tdc);
501 UpgradeSToWLock(&avc->lock, 813);
502 avc->f.states &= ~CReadDir;
503 avc->dcreaddir = 0;
504 avc->readdir_pid = 0;
505 ReleaseSharedLock(&avc->lock);
506
507 out:
508 afs_PutFakeStat(&fakestat);
509 afs_DestroyReq(treq);
510 out1:
511 AFS_GUNLOCK();
512 return code;
513 }
514
515
516 /* in afs_pioctl.c */
517 extern int afs_xioctl(struct inode *ip, struct file *fp, unsigned int com,
518 unsigned long arg);
519
520 #if defined(HAVE_UNLOCKED_IOCTL) || defined(HAVE_COMPAT_IOCTL)
521 static long afs_unlocked_xioctl(struct file *fp, unsigned int com,
522 unsigned long arg) {
523 return afs_xioctl(FILE_INODE(fp), fp, com, arg);
524
525 }
526 #endif
527
528
529 static int
530 afs_linux_mmap(struct file *fp, struct vm_area_struct *vmap)
531 {
532 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
533 int code;
534
535 AFS_GLOCK();
536 afs_Trace3(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
537 ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_INT32,
538 vmap->vm_end - vmap->vm_start);
539
540 /* get a validated vcache entry */
541 code = afs_linux_VerifyVCache(vcp, NULL);
542
543 if (code == 0) {
544 /* Linux's Flushpage implementation doesn't use credp, so optimise
545 * our code to not need to crref() it */
546 osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
547 AFS_GUNLOCK();
548 code = generic_file_mmap(fp, vmap);
549 AFS_GLOCK();
550 if (!code)
551 vcp->f.states |= CMAPPED;
552 }
553 AFS_GUNLOCK();
554
555 return code;
556 }
557
558 static int
559 afs_linux_open(struct inode *ip, struct file *fp)
560 {
561 struct vcache *vcp = VTOAFS(ip);
562 cred_t *credp = crref();
563 int code;
564
565 AFS_GLOCK();
566 code = afs_open(&vcp, fp->f_flags, credp);
567 AFS_GUNLOCK();
568
569 crfree(credp);
570 return afs_convert_code(code);
571 }
572
573 static int
574 afs_linux_release(struct inode *ip, struct file *fp)
575 {
576 struct vcache *vcp = VTOAFS(ip);
577 cred_t *credp = crref();
578 int code = 0;
579
580 AFS_GLOCK();
581 code = afs_close(vcp, fp->f_flags, credp);
582 ObtainWriteLock(&vcp->lock, 807);
583 if (vcp->cred) {
584 crfree(vcp->cred);
585 vcp->cred = NULL;
586 }
587 ReleaseWriteLock(&vcp->lock);
588 AFS_GUNLOCK();
589
590 crfree(credp);
591 return afs_convert_code(code);
592 }
593
594 static int
595 #if defined(FOP_FSYNC_TAKES_DENTRY)
596 afs_linux_fsync(struct file *fp, struct dentry *dp, int datasync)
597 #elif defined(FOP_FSYNC_TAKES_RANGE)
598 afs_linux_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
599 #else
600 afs_linux_fsync(struct file *fp, int datasync)
601 #endif
602 {
603 int code;
604 struct inode *ip = FILE_INODE(fp);
605 cred_t *credp = crref();
606
607 #if defined(FOP_FSYNC_TAKES_RANGE)
608 afs_linux_lock_inode(ip);
609 #endif
610 AFS_GLOCK();
611 code = afs_fsync(VTOAFS(ip), credp);
612 AFS_GUNLOCK();
613 #if defined(FOP_FSYNC_TAKES_RANGE)
614 afs_linux_unlock_inode(ip);
615 #endif
616 crfree(credp);
617 return afs_convert_code(code);
618
619 }
620
621
622 static int
623 afs_linux_lock(struct file *fp, int cmd, struct file_lock *flp)
624 {
625 int code = 0;
626 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
627 cred_t *credp = crref();
628 struct AFS_FLOCK flock;
629
630 /* Convert to a lock format afs_lockctl understands. */
631 memset(&flock, 0, sizeof(flock));
632 flock.l_type = flp->fl_type;
633 flock.l_pid = flp->fl_pid;
634 flock.l_whence = 0;
635 flock.l_start = flp->fl_start;
636 if (flp->fl_end == OFFSET_MAX)
637 flock.l_len = 0; /* Lock to end of file */
638 else
639 flock.l_len = flp->fl_end - flp->fl_start + 1;
640
641 /* Safe because there are no large files, yet */
642 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
643 if (cmd == F_GETLK64)
644 cmd = F_GETLK;
645 else if (cmd == F_SETLK64)
646 cmd = F_SETLK;
647 else if (cmd == F_SETLKW64)
648 cmd = F_SETLKW;
649 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
650
651 AFS_GLOCK();
652 code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
653 AFS_GUNLOCK();
654
655 if ((code == 0 || flp->fl_type == F_UNLCK) &&
656 (cmd == F_SETLK || cmd == F_SETLKW)) {
657 code = afs_posix_lock_file(fp, flp);
658 if (code && flp->fl_type != F_UNLCK) {
659 struct AFS_FLOCK flock2;
660 flock2 = flock;
661 flock2.l_type = F_UNLCK;
662 AFS_GLOCK();
663 afs_lockctl(vcp, &flock2, F_SETLK, credp);
664 AFS_GUNLOCK();
665 }
666 }
667 /* If lockctl says there are no conflicting locks, then also check with the
668 * kernel, as lockctl knows nothing about byte range locks
669 */
670 if (code == 0 && cmd == F_GETLK && flock.l_type == F_UNLCK) {
671 afs_posix_test_lock(fp, flp);
672 /* If we found a lock in the kernel's structure, return it */
673 if (flp->fl_type != F_UNLCK) {
674 crfree(credp);
675 return 0;
676 }
677 }
678
679 /* Convert flock back to Linux's file_lock */
680 flp->fl_type = flock.l_type;
681 flp->fl_pid = flock.l_pid;
682 flp->fl_start = flock.l_start;
683 if (flock.l_len == 0)
684 flp->fl_end = OFFSET_MAX; /* Lock to end of file */
685 else
686 flp->fl_end = flock.l_start + flock.l_len - 1;
687
688 crfree(credp);
689 return code;
690 }
691
692 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
693 static int
694 afs_linux_flock(struct file *fp, int cmd, struct file_lock *flp) {
695 int code = 0;
696 struct vcache *vcp = VTOAFS(FILE_INODE(fp));
697 cred_t *credp = crref();
698 struct AFS_FLOCK flock;
699 /* Convert to a lock format afs_lockctl understands. */
700 memset(&flock, 0, sizeof(flock));
701 flock.l_type = flp->fl_type;
702 flock.l_pid = flp->fl_pid;
703 flock.l_whence = 0;
704 flock.l_start = 0;
705 flock.l_len = 0;
706
707 /* Safe because there are no large files, yet */
708 #if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
709 if (cmd == F_GETLK64)
710 cmd = F_GETLK;
711 else if (cmd == F_SETLK64)
712 cmd = F_SETLK;
713 else if (cmd == F_SETLKW64)
714 cmd = F_SETLKW;
715 #endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
716
717 AFS_GLOCK();
718 code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
719 AFS_GUNLOCK();
720
721 if ((code == 0 || flp->fl_type == F_UNLCK) &&
722 (cmd == F_SETLK || cmd == F_SETLKW)) {
723 flp->fl_flags &=~ FL_SLEEP;
724 code = flock_lock_file_wait(fp, flp);
725 if (code && flp->fl_type != F_UNLCK) {
726 struct AFS_FLOCK flock2;
727 flock2 = flock;
728 flock2.l_type = F_UNLCK;
729 AFS_GLOCK();
730 afs_lockctl(vcp, &flock2, F_SETLK, credp);
731 AFS_GUNLOCK();
732 }
733 }
734 /* Convert flock back to Linux's file_lock */
735 flp->fl_type = flock.l_type;
736 flp->fl_pid = flock.l_pid;
737
738 crfree(credp);
739 return code;
740 }
741 #endif
742
743 /* afs_linux_flush
744 * essentially the same as afs_fsync() but we need to get the return
745 * code for the sys_close() here, not afs_linux_release(), so call
746 * afs_StoreAllSegments() with AFS_LASTSTORE
747 */
748 static int
749 #if defined(FOP_FLUSH_TAKES_FL_OWNER_T)
750 afs_linux_flush(struct file *fp, fl_owner_t id)
751 #else
752 afs_linux_flush(struct file *fp)
753 #endif
754 {
755 struct vrequest *treq = NULL;
756 struct vcache *vcp;
757 cred_t *credp;
758 int code;
759 int bypasscache = 0;
760
761 AFS_GLOCK();
762
763 if ((fp->f_flags & O_ACCMODE) == O_RDONLY) { /* readers dont flush */
764 AFS_GUNLOCK();
765 return 0;
766 }
767
768 AFS_DISCON_LOCK();
769
770 credp = crref();
771 vcp = VTOAFS(FILE_INODE(fp));
772
773 code = afs_CreateReq(&treq, credp);
774 if (code)
775 goto out;
776 /* If caching is bypassed for this file, or globally, just return 0 */
777 if (cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
778 bypasscache = 1;
779 else {
780 ObtainReadLock(&vcp->lock);
781 if (vcp->cachingStates & FCSBypass)
782 bypasscache = 1;
783 ReleaseReadLock(&vcp->lock);
784 }
785 if (bypasscache) {
786 /* future proof: don't rely on 0 return from afs_InitReq */
787 code = 0;
788 goto out;
789 }
790
791 ObtainSharedLock(&vcp->lock, 535);
792 if ((vcp->execsOrWriters > 0) && (file_count(fp) == 1)) {
793 UpgradeSToWLock(&vcp->lock, 536);
794 if (!AFS_IS_DISCONNECTED) {
795 code = afs_StoreAllSegments(vcp,
796 treq,
797 AFS_SYNC | AFS_LASTSTORE);
798 } else {
799 afs_DisconAddDirty(vcp, VDisconWriteOsiFlush, 1);
800 }
801 ConvertWToSLock(&vcp->lock);
802 }
803 code = afs_CheckCode(code, treq, 54);
804 ReleaseSharedLock(&vcp->lock);
805
806 out:
807 afs_DestroyReq(treq);
808 AFS_DISCON_UNLOCK();
809 AFS_GUNLOCK();
810
811 crfree(credp);
812 return afs_convert_code(code);
813 }
814
815 struct file_operations afs_dir_fops = {
816 .read = generic_read_dir,
817 #if defined(USE_FOP_ITERATE)
818 .iterate = afs_linux_readdir,
819 #else
820 .readdir = afs_linux_readdir,
821 #endif
822 #ifdef HAVE_UNLOCKED_IOCTL
823 .unlocked_ioctl = afs_unlocked_xioctl,
824 #else
825 .ioctl = afs_xioctl,
826 #endif
827 #ifdef HAVE_COMPAT_IOCTL
828 .compat_ioctl = afs_unlocked_xioctl,
829 #endif
830 .open = afs_linux_open,
831 .release = afs_linux_release,
832 .llseek = default_llseek,
833 #ifdef HAVE_LINUX_NOOP_FSYNC
834 .fsync = noop_fsync,
835 #else
836 .fsync = simple_sync_file,
837 #endif
838 };
839
840 struct file_operations afs_file_fops = {
841 #ifdef STRUCT_FILE_OPERATIONS_HAS_READ_ITER
842 .read_iter = afs_linux_read_iter,
843 .write_iter = afs_linux_write_iter,
844 # if !defined(HAVE_LINUX___VFS_WRITE) && !defined(HAVE_LINUX_KERNEL_WRITE)
845 .read = new_sync_read,
846 .write = new_sync_write,
847 # endif
848 #elif defined(HAVE_LINUX_GENERIC_FILE_AIO_READ)
849 .aio_read = afs_linux_aio_read,
850 .aio_write = afs_linux_aio_write,
851 .read = do_sync_read,
852 .write = do_sync_write,
853 #else
854 .read = afs_linux_read,
855 .write = afs_linux_write,
856 #endif
857 #ifdef HAVE_UNLOCKED_IOCTL
858 .unlocked_ioctl = afs_unlocked_xioctl,
859 #else
860 .ioctl = afs_xioctl,
861 #endif
862 #ifdef HAVE_COMPAT_IOCTL
863 .compat_ioctl = afs_unlocked_xioctl,
864 #endif
865 .mmap = afs_linux_mmap,
866 .open = afs_linux_open,
867 .flush = afs_linux_flush,
868 #if defined(STRUCT_FILE_OPERATIONS_HAS_SENDFILE)
869 .sendfile = generic_file_sendfile,
870 #endif
871 #if defined(STRUCT_FILE_OPERATIONS_HAS_SPLICE) && !defined(HAVE_LINUX_DEFAULT_FILE_SPLICE_READ)
872 # if defined(HAVE_LINUX_ITER_FILE_SPLICE_WRITE)
873 .splice_write = iter_file_splice_write,
874 # else
875 .splice_write = generic_file_splice_write,
876 # endif
877 .splice_read = generic_file_splice_read,
878 #endif
879 .release = afs_linux_release,
880 .fsync = afs_linux_fsync,
881 .lock = afs_linux_lock,
882 #ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
883 .flock = afs_linux_flock,
884 #endif
885 .llseek = default_llseek,
886 };
887
888 static struct dentry *
889 canonical_dentry(struct inode *ip)
890 {
891 struct vcache *vcp = VTOAFS(ip);
892 struct dentry *first = NULL, *ret = NULL, *cur;
893 #if defined(D_ALIAS_IS_HLIST) && !defined(HLIST_ITERATOR_NO_NODE)
894 struct hlist_node *p;
895 #endif
896
897 /* general strategy:
898 * if vcp->target_link is set, and can be found in ip->i_dentry, use that.
899 * otherwise, use the first dentry in ip->i_dentry.
900 * if ip->i_dentry is empty, use the 'dentry' argument we were given.
901 */
902 /* note that vcp->target_link specifies which dentry to use, but we have
903 * no reference held on that dentry. so, we cannot use or dereference
904 * vcp->target_link itself, since it may have been freed. instead, we only
905 * use it to compare to pointers in the ip->i_dentry list. */
906
907 d_prune_aliases(ip);
908
909 afs_d_alias_lock(ip);
910
911 #if defined(D_ALIAS_IS_HLIST)
912 # if defined(HLIST_ITERATOR_NO_NODE)
913 hlist_for_each_entry(cur, &ip->i_dentry, d_alias) {
914 # else
915 hlist_for_each_entry(cur, p, &ip->i_dentry, d_alias) {
916 # endif
917 #else
918 list_for_each_entry_reverse(cur, &ip->i_dentry, d_alias) {
919 #endif
920
921 if (!vcp->target_link || cur == vcp->target_link) {
922 ret = cur;
923 break;
924 }
925
926 if (!first) {
927 first = cur;
928 }
929 }
930 if (!ret && first) {
931 ret = first;
932 }
933
934 vcp->target_link = ret;
935
936 if (ret) {
937 afs_linux_dget(ret);
938 }
939 afs_d_alias_unlock(ip);
940
941 return ret;
942 }
943
944 /**********************************************************************
945 * AFS Linux dentry operations
946 **********************************************************************/
947
948 /* afs_linux_revalidate
949 * Ensure vcache is stat'd before use. Return 0 if entry is valid.
950 */
951 static int
952 afs_linux_revalidate(struct dentry *dp)
953 {
954 struct vattr *vattr = NULL;
955 struct vcache *vcp = VTOAFS(dp->d_inode);
956 cred_t *credp;
957 int code;
958
959 if (afs_shuttingdown != AFS_RUNNING)
960 return EIO;
961
962 AFS_GLOCK();
963
964 code = afs_CreateAttr(&vattr);
965 if (code) {
966 goto out;
967 }
968
969 /* This avoids the crref when we don't have to do it. Watch for
970 * changes in afs_getattr that don't get replicated here!
971 */
972 if (vcp->f.states & CStatd &&
973 (!afs_fakestat_enable || vcp->mvstat != AFS_MVSTAT_MTPT) &&
974 !afs_nfsexporter &&
975 (vType(vcp) == VDIR || vType(vcp) == VLNK)) {
976 code = afs_CopyOutAttrs(vcp, vattr);
977 } else {
978 credp = crref();
979 code = afs_getattr(vcp, vattr, credp);
980 crfree(credp);
981 }
982
983 if (!code)
984 afs_fill_inode(AFSTOV(vcp), vattr);
985
986 afs_DestroyAttr(vattr);
987
988 out:
989 AFS_GUNLOCK();
990
991 return afs_convert_code(code);
992 }
993
994 /* vattr_setattr
995 * Set iattr data into vattr. Assume vattr cleared before call.
996 */
997 static void
998 iattr2vattr(struct vattr *vattrp, struct iattr *iattrp)
999 {
1000 vattrp->va_mask = iattrp->ia_valid;
1001 if (iattrp->ia_valid & ATTR_MODE)
1002 vattrp->va_mode = iattrp->ia_mode;
1003 if (iattrp->ia_valid & ATTR_UID)
1004 vattrp->va_uid = afs_from_kuid(iattrp->ia_uid);
1005 if (iattrp->ia_valid & ATTR_GID)
1006 vattrp->va_gid = afs_from_kgid(iattrp->ia_gid);
1007 if (iattrp->ia_valid & ATTR_SIZE)
1008 vattrp->va_size = iattrp->ia_size;
1009 if (iattrp->ia_valid & ATTR_ATIME) {
1010 vattrp->va_atime.tv_sec = iattrp->ia_atime.tv_sec;
1011 vattrp->va_atime.tv_usec = 0;
1012 }
1013 if (iattrp->ia_valid & ATTR_MTIME) {
1014 vattrp->va_mtime.tv_sec = iattrp->ia_mtime.tv_sec;
1015 vattrp->va_mtime.tv_usec = 0;
1016 }
1017 if (iattrp->ia_valid & ATTR_CTIME) {
1018 vattrp->va_ctime.tv_sec = iattrp->ia_ctime.tv_sec;
1019 vattrp->va_ctime.tv_usec = 0;
1020 }
1021 }
1022
1023 /* vattr2inode
1024 * Rewrite the inode cache from the attr. Assumes all vattr fields are valid.
1025 */
1026 void
1027 vattr2inode(struct inode *ip, struct vattr *vp)
1028 {
1029 ip->i_ino = vp->va_nodeid;
1030 #ifdef HAVE_LINUX_SET_NLINK
1031 set_nlink(ip, vp->va_nlink);
1032 #else
1033 ip->i_nlink = vp->va_nlink;
1034 #endif
1035 ip->i_blocks = vp->va_blocks;
1036 #ifdef STRUCT_INODE_HAS_I_BLKBITS
1037 ip->i_blkbits = AFS_BLKBITS;
1038 #endif
1039 #ifdef STRUCT_INODE_HAS_I_BLKSIZE
1040 ip->i_blksize = vp->va_blocksize;
1041 #endif
1042 ip->i_rdev = vp->va_rdev;
1043 ip->i_mode = vp->va_mode;
1044 ip->i_uid = afs_make_kuid(vp->va_uid);
1045 ip->i_gid = afs_make_kgid(vp->va_gid);
1046 i_size_write(ip, vp->va_size);
1047 ip->i_atime.tv_sec = vp->va_atime.tv_sec;
1048 ip->i_atime.tv_nsec = 0;
1049 ip->i_mtime.tv_sec = vp->va_mtime.tv_sec;
1050 /* Set the mtime nanoseconds to the sysname generation number.
1051 * This convinces NFS clients that all directories have changed
1052 * any time the sysname list changes.
1053 */
1054 ip->i_mtime.tv_nsec = afs_sysnamegen;
1055 ip->i_ctime.tv_sec = vp->va_ctime.tv_sec;
1056 ip->i_ctime.tv_nsec = 0;
1057 }
1058
1059 /* afs_notify_change
1060 * Linux version of setattr call. What to change is in the iattr struct.
1061 * We need to set bits in both the Linux inode as well as the vcache.
1062 */
1063 static int
1064 afs_notify_change(struct dentry *dp, struct iattr *iattrp)
1065 {
1066 struct vattr *vattr = NULL;
1067 cred_t *credp = crref();
1068 struct inode *ip = dp->d_inode;
1069 int code;
1070
1071 AFS_GLOCK();
1072 code = afs_CreateAttr(&vattr);
1073 if (code) {
1074 goto out;
1075 }
1076
1077 iattr2vattr(vattr, iattrp); /* Convert for AFS vnodeops call. */
1078
1079 code = afs_setattr(VTOAFS(ip), vattr, credp);
1080 if (!code) {
1081 afs_getattr(VTOAFS(ip), vattr, credp);
1082 vattr2inode(ip, vattr);
1083 }
1084 afs_DestroyAttr(vattr);
1085
1086 out:
1087 AFS_GUNLOCK();
1088 crfree(credp);
1089 return afs_convert_code(code);
1090 }
1091
1092 #if defined(IOP_GETATTR_TAKES_PATH_STRUCT)
1093 static int
1094 afs_linux_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int sync_mode)
1095 {
1096 int err = afs_linux_revalidate(path->dentry);
1097 if (!err) {
1098 generic_fillattr(path->dentry->d_inode, stat);
1099 }
1100 return err;
1101 }
1102 #else
1103 static int
1104 afs_linux_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
1105 {
1106 int err = afs_linux_revalidate(dentry);
1107 if (!err) {
1108 generic_fillattr(dentry->d_inode, stat);
1109 }
1110 return err;
1111 }
1112 #endif
1113
1114 static afs_uint32
1115 parent_vcache_dv(struct inode *inode, cred_t *credp)
1116 {
1117 int free_cred = 0;
1118 struct vcache *pvcp;
1119
1120 /*
1121 * If parent is a mount point and we are using fakestat, we may need
1122 * to look at the fake vcache entry instead of what the vfs is giving
1123 * us. The fake entry is the one with the useful DataVersion.
1124 */
1125 pvcp = VTOAFS(inode);
1126 if (pvcp->mvstat == AFS_MVSTAT_MTPT && afs_fakestat_enable) {
1127 struct vrequest treq;
1128 struct afs_fakestat_state fakestate;
1129
1130 if (!credp) {
1131 credp = crref();
1132 free_cred = 1;
1133 }
1134 afs_InitReq(&treq, credp);
1135 afs_InitFakeStat(&fakestate);
1136 afs_TryEvalFakeStat(&pvcp, &fakestate, &treq);
1137 if (free_cred)
1138 crfree(credp);
1139 afs_PutFakeStat(&fakestate);
1140 }
1141 return hgetlo(pvcp->f.m.DataVersion);
1142 }
1143
1144 static inline int
1145 filter_enoent(int code)
1146 {
1147 #ifdef HAVE_LINUX_FATAL_SIGNAL_PENDING
1148 if (code == ENOENT && fatal_signal_pending(current)) {
1149 return EINTR;
1150 }
1151 #endif
1152 return code;
1153 }
1154
1155 #ifndef D_SPLICE_ALIAS_RACE
1156
1157 static inline void dentry_race_lock(void) {}
1158 static inline void dentry_race_unlock(void) {}
1159
1160 #else
1161
1162 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
1163 static DEFINE_MUTEX(dentry_race_sem);
1164 # else
1165 static DECLARE_MUTEX(dentry_race_sem);
1166 # endif
1167
1168 static inline void
1169 dentry_race_lock(void)
1170 {
1171 mutex_lock(&dentry_race_sem);
1172 }
1173 static inline void
1174 dentry_race_unlock(void)
1175 {
1176 mutex_unlock(&dentry_race_sem);
1177 }
1178
1179 /* Leave some trace that this code is enabled; otherwise it's pretty hard to
1180 * tell. */
1181 static __attribute__((used)) const char dentry_race_marker[] = "d_splice_alias race workaround enabled";
1182
1183 static int
1184 check_dentry_race(struct dentry *dp)
1185 {
1186 int raced = 0;
1187 if (!dp->d_inode) {
1188 /* In Linux, before commit 4919c5e45a91b5db5a41695fe0357fbdff0d5767,
1189 * d_splice_alias can momentarily hash a dentry before it's fully
1190 * populated. This only happens for a moment, since it's unhashed again
1191 * right after (in d_move), but this can make the dentry be found by
1192 * __d_lookup, and then given to us.
1193 *
1194 * So check if the dentry is unhashed; if it is, then the dentry is not
1195 * valid. We lock dentry_race_lock() to ensure that d_splice_alias is
1196 * no longer running. Locking d_lock is required to check the dentry's
1197 * flags, so lock that, too.
1198 */
1199 dentry_race_lock();
1200 spin_lock(&dp->d_lock);
1201 if (d_unhashed(dp)) {
1202 raced = 1;
1203 }
1204 spin_unlock(&dp->d_lock);
1205 dentry_race_unlock();
1206 }
1207 return raced;
1208 }
1209 #endif /* D_SPLICE_ALIAS_RACE */
1210
1211 /* Validate a dentry. Return 1 if unchanged, 0 if VFS layer should re-evaluate.
1212 * In kernels 2.2.10 and above, we are passed an additional flags var which
1213 * may have either the LOOKUP_FOLLOW OR LOOKUP_DIRECTORY set in which case
1214 * we are advised to follow the entry if it is a link or to make sure that
1215 * it is a directory. But since the kernel itself checks these possibilities
1216 * later on, we shouldn't have to do it until later. Perhaps in the future..
1217 *
1218 * The code here assumes that on entry the global lock is not held
1219 */
1220 static int
1221 #if defined(DOP_REVALIDATE_TAKES_UNSIGNED)
1222 afs_linux_dentry_revalidate(struct dentry *dp, unsigned int flags)
1223 #elif defined(DOP_REVALIDATE_TAKES_NAMEIDATA)
1224 afs_linux_dentry_revalidate(struct dentry *dp, struct nameidata *nd)
1225 #else
1226 afs_linux_dentry_revalidate(struct dentry *dp, int flags)
1227 #endif
1228 {
1229 cred_t *credp = NULL;
1230 struct vcache *vcp, *pvcp, *tvc = NULL;
1231 struct dentry *parent;
1232 int valid;
1233 struct afs_fakestat_state fakestate;
1234 int force_drop = 0;
1235 afs_uint32 parent_dv;
1236
1237 #ifdef LOOKUP_RCU
1238 /* We don't support RCU path walking */
1239 # if defined(DOP_REVALIDATE_TAKES_UNSIGNED)
1240 if (flags & LOOKUP_RCU)
1241 # else
1242 if (nd->flags & LOOKUP_RCU)
1243 # endif
1244 return -ECHILD;
1245 #endif
1246
1247 #ifdef D_SPLICE_ALIAS_RACE
1248 if (check_dentry_race(dp)) {
1249 valid = 0;
1250 return valid;
1251 }
1252 #endif
1253
1254 AFS_GLOCK();
1255 afs_InitFakeStat(&fakestate);
1256
1257 if (dp->d_inode) {
1258 vcp = VTOAFS(dp->d_inode);
1259
1260 if (vcp == afs_globalVp)
1261 goto good_dentry;
1262
1263 if (vcp->mvstat == AFS_MVSTAT_MTPT) {
1264 if (vcp->mvid.target_root && (vcp->f.states & CMValid)) {
1265 int tryEvalOnly = 0;
1266 int code = 0;
1267 struct vrequest *treq = NULL;
1268
1269 credp = crref();
1270
1271 code = afs_CreateReq(&treq, credp);
1272 if (code) {
1273 goto bad_dentry;
1274 }
1275 if ((strcmp(dp->d_name.name, ".directory") == 0)) {
1276 tryEvalOnly = 1;
1277 }
1278 if (tryEvalOnly)
1279 code = afs_TryEvalFakeStat(&vcp, &fakestate, treq);
1280 else
1281 code = afs_EvalFakeStat(&vcp, &fakestate, treq);
1282 afs_DestroyReq(treq);
1283 if ((tryEvalOnly && vcp->mvstat == AFS_MVSTAT_MTPT) || code) {
1284 /* a mount point, not yet replaced by its directory */
1285 goto bad_dentry;
1286 }
1287 }
1288 } else if (vcp->mvstat == AFS_MVSTAT_ROOT && *dp->d_name.name != '/') {
1289 osi_Assert(vcp->mvid.parent != NULL);
1290 }
1291
1292 #ifdef notdef
1293 /* If the last looker changes, we should make sure the current
1294 * looker still has permission to examine this file. This would
1295 * always require a crref() which would be "slow".
1296 */
1297 if (vcp->last_looker != treq.uid) {
1298 if (!afs_AccessOK(vcp, (vType(vcp) == VREG) ? PRSFS_READ : PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
1299 goto bad_dentry;
1300 }
1301
1302 vcp->last_looker = treq.uid;
1303 }
1304 #endif
1305
1306 parent = dget_parent(dp);
1307 pvcp = VTOAFS(parent->d_inode);
1308 parent_dv = parent_vcache_dv(parent->d_inode, credp);
1309
1310 /* If the parent's DataVersion has changed or the vnode
1311 * is longer valid, we need to do a full lookup. VerifyVCache
1312 * isn't enough since the vnode may have been renamed.
1313 */
1314
1315 if (parent_dv > dp->d_time || !(vcp->f.states & CStatd)) {
1316 struct vattr *vattr = NULL;
1317 int code;
1318 int lookup_good;
1319
1320 if (credp == NULL) {
1321 credp = crref();
1322 }
1323 code = afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
1324 code = filter_enoent(code);
1325
1326 if (code) {
1327 /* We couldn't perform the lookup, so we're not okay. */
1328 lookup_good = 0;
1329
1330 } else if (tvc == vcp) {
1331 /* We got back the same vcache, so we're good. */
1332 lookup_good = 1;
1333
1334 } else if (tvc == VTOAFS(dp->d_inode)) {
1335 /* We got back the same vcache, so we're good. This is
1336 * different from the above case, because sometimes 'vcp' is
1337 * not the same as the vcache for dp->d_inode, if 'vcp' was a
1338 * mtpt and we evaluated it to a root dir. In rare cases,
1339 * afs_lookup might not evalute the mtpt when we do, or vice
1340 * versa, so the previous case will not succeed. But this is
1341 * still 'correct', so make sure not to mark the dentry as
1342 * invalid; it still points to the same thing! */
1343 lookup_good = 1;
1344
1345 } else {
1346 /* We got back a different file, so we're definitely not
1347 * okay. */
1348 lookup_good = 0;
1349 }
1350
1351 if (!lookup_good) {
1352 dput(parent);
1353 /* Force unhash; the name doesn't point to this file
1354 * anymore. */
1355 force_drop = 1;
1356 if (code && code != ENOENT) {
1357 /* ...except if we couldn't perform the actual lookup,
1358 * we don't know if the name points to this file or not. */
1359 force_drop = 0;
1360 }
1361 goto bad_dentry;
1362 }
1363
1364 code = afs_CreateAttr(&vattr);
1365 if (code) {
1366 dput(parent);
1367 goto bad_dentry;
1368 }
1369
1370 if (afs_getattr(vcp, vattr, credp)) {
1371 dput(parent);
1372 afs_DestroyAttr(vattr);
1373 goto bad_dentry;
1374 }
1375
1376 vattr2inode(AFSTOV(vcp), vattr);
1377 dp->d_time = parent_dv;
1378
1379 afs_DestroyAttr(vattr);
1380 }
1381
1382 /* should we always update the attributes at this point? */
1383 /* unlikely--the vcache entry hasn't changed */
1384
1385 dput(parent);
1386
1387 } else {
1388
1389 /* 'dp' represents a cached negative lookup. */
1390
1391 parent = dget_parent(dp);
1392 pvcp = VTOAFS(parent->d_inode);
1393 parent_dv = parent_vcache_dv(parent->d_inode, credp);
1394
1395 if (parent_dv > dp->d_time || !(pvcp->f.states & CStatd)
1396 || afs_IsDynroot(pvcp)) {
1397 dput(parent);
1398 goto bad_dentry;
1399 }
1400
1401 dput(parent);
1402 }
1403
1404 good_dentry:
1405 valid = 1;
1406 goto done;
1407
1408 bad_dentry:
1409 valid = 0;
1410 #ifndef D_INVALIDATE_IS_VOID
1411 /* When (v3.18) d_invalidate was converted to void, it also started
1412 * being called automatically from revalidate, and automatically
1413 * handled:
1414 * - shrink_dcache_parent
1415 * - automatic detach of submounts
1416 * - d_drop
1417 * Therefore, after that point, OpenAFS revalidate logic no longer needs
1418 * to do any of those things itself for invalid dentry structs. We only need
1419 * to tell VFS it's invalid (by returning 0), and VFS will handle the rest.
1420 */
1421 if (have_submounts(dp))
1422 valid = 1;
1423 #endif
1424
1425 done:
1426 /* Clean up */
1427 if (tvc)
1428 afs_PutVCache(tvc);
1429 afs_PutFakeStat(&fakestate);
1430 AFS_GUNLOCK();
1431 if (credp)
1432 crfree(credp);
1433
1434 #ifndef D_INVALIDATE_IS_VOID
1435 if (!valid) {
1436 /*
1437 * If we had a negative lookup for the name we want to forcibly
1438 * unhash the dentry.
1439 * Otherwise use d_invalidate which will not unhash it if still in use.
1440 */
1441 if (force_drop) {
1442 shrink_dcache_parent(dp);
1443 d_drop(dp);
1444 } else
1445 d_invalidate(dp);
1446 }
1447 #endif
1448 return valid;
1449
1450 }
1451
1452 static void
1453 afs_dentry_iput(struct dentry *dp, struct inode *ip)
1454 {
1455 struct vcache *vcp = VTOAFS(ip);
1456
1457 AFS_GLOCK();
1458 if (!AFS_IS_DISCONNECTED || (vcp->f.states & CUnlinked)) {
1459 (void) afs_InactiveVCache(vcp, NULL);
1460 }
1461 AFS_GUNLOCK();
1462 afs_linux_clear_nfsfs_renamed(dp);
1463
1464 iput(ip);
1465 }
1466
1467 static int
1468 #if defined(DOP_D_DELETE_TAKES_CONST)
1469 afs_dentry_delete(const struct dentry *dp)
1470 #else
1471 afs_dentry_delete(struct dentry *dp)
1472 #endif
1473 {
1474 if (dp->d_inode && (VTOAFS(dp->d_inode)->f.states & CUnlinked))
1475 return 1; /* bad inode? */
1476
1477 return 0;
1478 }
1479
1480 #ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
1481 static struct vfsmount *
1482 afs_dentry_automount(afs_linux_path_t *path)
1483 {
1484 struct dentry *target;
1485
1486 /*
1487 * Avoid symlink resolution limits when resolving; we cannot contribute to
1488 * an infinite symlink loop.
1489 *
1490 * On newer kernels the field has moved to the private nameidata structure
1491 * so we can't adjust it here. This may cause ELOOP when using a path with
1492 * 40 or more directories that are not already in the dentry cache.
1493 */
1494 #if defined(STRUCT_TASK_STRUCT_HAS_TOTAL_LINK_COUNT)
1495 current->total_link_count--;
1496 #endif
1497
1498 target = canonical_dentry(path->dentry->d_inode);
1499
1500 if (target == path->dentry) {
1501 dput(target);
1502 target = NULL;
1503 }
1504
1505 if (target) {
1506 dput(path->dentry);
1507 path->dentry = target;
1508
1509 } else {
1510 spin_lock(&path->dentry->d_lock);
1511 path->dentry->d_flags &= ~DCACHE_NEED_AUTOMOUNT;
1512 spin_unlock(&path->dentry->d_lock);
1513 }
1514
1515 return NULL;
1516 }
1517 #endif /* STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
1518
1519 struct dentry_operations afs_dentry_operations = {
1520 .d_revalidate = afs_linux_dentry_revalidate,
1521 .d_delete = afs_dentry_delete,
1522 .d_iput = afs_dentry_iput,
1523 #ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
1524 .d_automount = afs_dentry_automount,
1525 #endif /* STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
1526 };
1527
1528 /**********************************************************************
1529 * AFS Linux inode operations
1530 **********************************************************************/
1531
1532 /* afs_linux_create
1533 *
1534 * Merely need to set enough of vattr to get us through the create. Note
1535 * that the higher level code (open_namei) will take care of any tuncation
1536 * explicitly. Exclusive open is also taken care of in open_namei.
1537 *
1538 * name is in kernel space at this point.
1539 */
1540 static int
1541 #if defined(IOP_CREATE_TAKES_BOOL)
1542 afs_linux_create(struct inode *dip, struct dentry *dp, umode_t mode,
1543 bool excl)
1544 #elif defined(IOP_CREATE_TAKES_UMODE_T)
1545 afs_linux_create(struct inode *dip, struct dentry *dp, umode_t mode,
1546 struct nameidata *nd)
1547 #elif defined(IOP_CREATE_TAKES_NAMEIDATA)
1548 afs_linux_create(struct inode *dip, struct dentry *dp, int mode,
1549 struct nameidata *nd)
1550 #else
1551 afs_linux_create(struct inode *dip, struct dentry *dp, int mode)
1552 #endif
1553 {
1554 struct vattr *vattr = NULL;
1555 cred_t *credp = crref();
1556 const char *name = dp->d_name.name;
1557 struct vcache *vcp;
1558 int code;
1559
1560 AFS_GLOCK();
1561
1562 code = afs_CreateAttr(&vattr);
1563 if (code) {
1564 goto out;
1565 }
1566 vattr->va_mode = mode;
1567 vattr->va_type = mode & S_IFMT;
1568
1569 code = afs_create(VTOAFS(dip), (char *)name, vattr, NONEXCL, mode,
1570 &vcp, credp);
1571
1572 if (!code) {
1573 struct inode *ip = AFSTOV(vcp);
1574
1575 afs_getattr(vcp, vattr, credp);
1576 afs_fill_inode(ip, vattr);
1577 insert_inode_hash(ip);
1578 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1579 dp->d_op = &afs_dentry_operations;
1580 #endif
1581 dp->d_time = parent_vcache_dv(dip, credp);
1582 d_instantiate(dp, ip);
1583 }
1584
1585 afs_DestroyAttr(vattr);
1586
1587 out:
1588 AFS_GUNLOCK();
1589
1590 crfree(credp);
1591 return afs_convert_code(code);
1592 }
1593
1594 /* afs_linux_lookup */
1595 static struct dentry *
1596 #if defined(IOP_LOOKUP_TAKES_UNSIGNED)
1597 afs_linux_lookup(struct inode *dip, struct dentry *dp,
1598 unsigned flags)
1599 #elif defined(IOP_LOOKUP_TAKES_NAMEIDATA)
1600 afs_linux_lookup(struct inode *dip, struct dentry *dp,
1601 struct nameidata *nd)
1602 #else
1603 afs_linux_lookup(struct inode *dip, struct dentry *dp)
1604 #endif
1605 {
1606 cred_t *credp = crref();
1607 struct vcache *vcp = NULL;
1608 const char *comp = dp->d_name.name;
1609 struct inode *ip = NULL;
1610 struct dentry *newdp = NULL;
1611 int code;
1612
1613 AFS_GLOCK();
1614
1615 code = afs_lookup(VTOAFS(dip), (char *)comp, &vcp, credp);
1616 code = filter_enoent(code);
1617 if (code == ENOENT) {
1618 /* It's ok for the file to not be found. That's noted by the caller by
1619 * seeing that the dp->d_inode field is NULL (set by d_splice_alias or
1620 * d_add, below). */
1621 code = 0;
1622 osi_Assert(vcp == NULL);
1623 }
1624 if (code) {
1625 AFS_GUNLOCK();
1626 goto done;
1627 }
1628
1629 if (vcp) {
1630 struct vattr *vattr = NULL;
1631 struct vcache *parent_vc = VTOAFS(dip);
1632
1633 if (parent_vc == vcp) {
1634 /* This is possible if the parent dir is a mountpoint to a volume,
1635 * and the dir entry we looked up is a mountpoint to the same
1636 * volume. Linux cannot cope with this, so return an error instead
1637 * of risking a deadlock or panic. */
1638 afs_PutVCache(vcp);
1639 code = EDEADLK;
1640 AFS_GUNLOCK();
1641 goto done;
1642 }
1643
1644 code = afs_CreateAttr(&vattr);
1645 if (code) {
1646 afs_PutVCache(vcp);
1647 AFS_GUNLOCK();
1648 goto done;
1649 }
1650
1651 ip = AFSTOV(vcp);
1652 afs_getattr(vcp, vattr, credp);
1653 afs_fill_inode(ip, vattr);
1654 if (hlist_unhashed(&ip->i_hash))
1655 insert_inode_hash(ip);
1656
1657 afs_DestroyAttr(vattr);
1658 }
1659 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1660 dp->d_op = &afs_dentry_operations;
1661 #endif
1662 dp->d_time = parent_vcache_dv(dip, credp);
1663
1664 AFS_GUNLOCK();
1665
1666 if (ip && S_ISDIR(ip->i_mode)) {
1667 d_prune_aliases(ip);
1668
1669 #ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
1670 /* Only needed if this is a volume root */
1671 if (vcp->mvstat == 2)
1672 ip->i_flags |= S_AUTOMOUNT;
1673 #endif
1674 }
1675 /*
1676 * Take an extra reference so the inode doesn't go away if
1677 * d_splice_alias drops our reference on error.
1678 */
1679 if (ip)
1680 #ifdef HAVE_LINUX_IHOLD
1681 ihold(ip);
1682 #else
1683 igrab(ip);
1684 #endif
1685
1686 dentry_race_lock();
1687 newdp = d_splice_alias(ip, dp);
1688 dentry_race_unlock();
1689
1690 done:
1691 crfree(credp);
1692
1693 if (IS_ERR(newdp)) {
1694 /* d_splice_alias can return an error (EIO) if there is an existing
1695 * connected directory alias for this dentry. Add our dentry manually
1696 * ourselves if this happens. */
1697 d_add(dp, ip);
1698
1699 #if defined(D_SPLICE_ALIAS_LEAK_ON_ERROR)
1700 /* Depending on the kernel version, d_splice_alias may or may not drop
1701 * the inode reference on error. If it didn't, do it here. */
1702 iput(ip);
1703 #endif
1704 return NULL;
1705 }
1706
1707 if (code) {
1708 if (ip)
1709 iput(ip);
1710 return ERR_PTR(afs_convert_code(code));
1711 }
1712
1713 iput(ip);
1714 return newdp;
1715 }
1716
1717 static int
1718 afs_linux_link(struct dentry *olddp, struct inode *dip, struct dentry *newdp)
1719 {
1720 int code;
1721 cred_t *credp = crref();
1722 const char *name = newdp->d_name.name;
1723 struct inode *oldip = olddp->d_inode;
1724
1725 /* If afs_link returned the vnode, we could instantiate the
1726 * dentry. Since it's not, we drop this one and do a new lookup.
1727 */
1728 d_drop(newdp);
1729
1730 AFS_GLOCK();
1731 code = afs_link(VTOAFS(oldip), VTOAFS(dip), (char *)name, credp);
1732
1733 AFS_GUNLOCK();
1734 crfree(credp);
1735 return afs_convert_code(code);
1736 }
1737
1738 /* We have to have a Linux specific sillyrename function, because we
1739 * also have to keep the dcache up to date when we're doing a silly
1740 * rename - so we don't want the generic vnodeops doing this behind our
1741 * back.
1742 */
1743
1744 static int
1745 afs_linux_sillyrename(struct inode *dir, struct dentry *dentry,
1746 cred_t *credp)
1747 {
1748 struct vcache *tvc = VTOAFS(dentry->d_inode);
1749 struct dentry *__dp = NULL;
1750 char *__name = NULL;
1751 int code;
1752
1753 if (afs_linux_nfsfs_renamed(dentry))
1754 return EBUSY;
1755
1756 do {
1757 dput(__dp);
1758
1759 AFS_GLOCK();
1760 if (__name)
1761 osi_FreeSmallSpace(__name);
1762 __name = afs_newname();
1763 AFS_GUNLOCK();
1764
1765 __dp = lookup_one_len(__name, dentry->d_parent, strlen(__name));
1766
1767 if (IS_ERR(__dp)) {
1768 osi_FreeSmallSpace(__name);
1769 return EBUSY;
1770 }
1771 } while (__dp->d_inode != NULL);
1772
1773 AFS_GLOCK();
1774 code = afs_rename(VTOAFS(dir), (char *)dentry->d_name.name,
1775 VTOAFS(dir), (char *)__dp->d_name.name,
1776 credp);
1777 if (!code) {
1778 tvc->mvid.silly_name = __name;
1779 crhold(credp);
1780 if (tvc->uncred) {
1781 crfree(tvc->uncred);
1782 }
1783 tvc->uncred = credp;
1784 tvc->f.states |= CUnlinked;
1785 afs_linux_set_nfsfs_renamed(dentry);
1786
1787 __dp->d_time = 0; /* force to revalidate */
1788 d_move(dentry, __dp);
1789 } else {
1790 osi_FreeSmallSpace(__name);
1791 }
1792 AFS_GUNLOCK();
1793
1794 dput(__dp);
1795
1796 return code;
1797 }
1798
1799
1800 static int
1801 afs_linux_unlink(struct inode *dip, struct dentry *dp)
1802 {
1803 int code = EBUSY;
1804 cred_t *credp = crref();
1805 const char *name = dp->d_name.name;
1806 struct vcache *tvc = VTOAFS(dp->d_inode);
1807
1808 if (VREFCOUNT(tvc) > 1 && tvc->opens > 0
1809 && !(tvc->f.states & CUnlinked)) {
1810
1811 code = afs_linux_sillyrename(dip, dp, credp);
1812 } else {
1813 AFS_GLOCK();
1814 code = afs_remove(VTOAFS(dip), (char *)name, credp);
1815 AFS_GUNLOCK();
1816 if (!code)
1817 d_drop(dp);
1818 }
1819
1820 crfree(credp);
1821 return afs_convert_code(code);
1822 }
1823
1824
1825 static int
1826 afs_linux_symlink(struct inode *dip, struct dentry *dp, const char *target)
1827 {
1828 int code;
1829 cred_t *credp = crref();
1830 struct vattr *vattr = NULL;
1831 const char *name = dp->d_name.name;
1832
1833 /* If afs_symlink returned the vnode, we could instantiate the
1834 * dentry. Since it's not, we drop this one and do a new lookup.
1835 */
1836 d_drop(dp);
1837
1838 AFS_GLOCK();
1839 code = afs_CreateAttr(&vattr);
1840 if (code) {
1841 goto out;
1842 }
1843
1844 code = afs_symlink(VTOAFS(dip), (char *)name, vattr, (char *)target, NULL,
1845 credp);
1846 afs_DestroyAttr(vattr);
1847
1848 out:
1849 AFS_GUNLOCK();
1850 crfree(credp);
1851 return afs_convert_code(code);
1852 }
1853
1854 static int
1855 #if defined(IOP_MKDIR_TAKES_UMODE_T)
1856 afs_linux_mkdir(struct inode *dip, struct dentry *dp, umode_t mode)
1857 #else
1858 afs_linux_mkdir(struct inode *dip, struct dentry *dp, int mode)
1859 #endif
1860 {
1861 int code;
1862 cred_t *credp = crref();
1863 struct vcache *tvcp = NULL;
1864 struct vattr *vattr = NULL;
1865 const char *name = dp->d_name.name;
1866
1867 AFS_GLOCK();
1868 code = afs_CreateAttr(&vattr);
1869 if (code) {
1870 goto out;
1871 }
1872
1873 vattr->va_mask = ATTR_MODE;
1874 vattr->va_mode = mode;
1875
1876 code = afs_mkdir(VTOAFS(dip), (char *)name, vattr, &tvcp, credp);
1877
1878 if (tvcp) {
1879 struct inode *ip = AFSTOV(tvcp);
1880
1881 afs_getattr(tvcp, vattr, credp);
1882 afs_fill_inode(ip, vattr);
1883
1884 #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
1885 dp->d_op = &afs_dentry_operations;
1886 #endif
1887 dp->d_time = parent_vcache_dv(dip, credp);
1888 d_instantiate(dp, ip);
1889 }
1890 afs_DestroyAttr(vattr);
1891
1892 out:
1893 AFS_GUNLOCK();
1894
1895 crfree(credp);
1896 return afs_convert_code(code);
1897 }
1898
1899 static int
1900 afs_linux_rmdir(struct inode *dip, struct dentry *dp)
1901 {
1902 int code;
1903 cred_t *credp = crref();
1904 const char *name = dp->d_name.name;
1905
1906 /* locking kernel conflicts with glock? */
1907
1908 AFS_GLOCK();
1909 code = afs_rmdir(VTOAFS(dip), (char *)name, credp);
1910 AFS_GUNLOCK();
1911
1912 /* Linux likes to see ENOTEMPTY returned from an rmdir() syscall
1913 * that failed because a directory is not empty. So, we map
1914 * EEXIST to ENOTEMPTY on linux.
1915 */
1916 if (code == EEXIST) {
1917 code = ENOTEMPTY;
1918 }
1919
1920 if (!code) {
1921 d_drop(dp);
1922 }
1923
1924 crfree(credp);
1925 return afs_convert_code(code);
1926 }
1927
1928
1929 static int
1930 afs_linux_rename(struct inode *oldip, struct dentry *olddp,
1931 struct inode *newip, struct dentry *newdp
1932 #ifdef HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS
1933 , unsigned int flags
1934 #endif
1935 )
1936 {
1937 int code;
1938 cred_t *credp = crref();
1939 const char *oldname = olddp->d_name.name;
1940 const char *newname = newdp->d_name.name;
1941 struct dentry *rehash = NULL;
1942
1943 #ifdef HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS
1944 if (flags)
1945 return -EINVAL; /* no support for new flags yet */
1946 #endif
1947
1948 /* Prevent any new references during rename operation. */
1949
1950 if (!d_unhashed(newdp)) {
1951 d_drop(newdp);
1952 rehash = newdp;
1953 }
1954
1955 afs_maybe_shrink_dcache(olddp);
1956
1957 AFS_GLOCK();
1958 code = afs_rename(VTOAFS(oldip), (char *)oldname, VTOAFS(newip), (char *)newname, credp);
1959 AFS_GUNLOCK();
1960
1961 if (!code)
1962 olddp->d_time = 0; /* force to revalidate */
1963
1964 if (rehash)
1965 d_rehash(rehash);
1966
1967 crfree(credp);
1968 return afs_convert_code(code);
1969 }
1970
1971
1972 /* afs_linux_ireadlink
1973 * Internal readlink which can return link contents to user or kernel space.
1974 * Note that the buffer is NOT supposed to be null-terminated.
1975 */
1976 static int
1977 afs_linux_ireadlink(struct inode *ip, char *target, int maxlen, uio_seg_t seg)
1978 {
1979 int code;
1980 cred_t *credp = crref();
1981 struct uio tuio;
1982 struct iovec iov;
1983
1984 memset(&tuio, 0, sizeof(tuio));
1985 memset(&iov, 0, sizeof(iov));
1986
1987 setup_uio(&tuio, &iov, target, (afs_offs_t) 0, maxlen, UIO_READ, seg);
1988 code = afs_readlink(VTOAFS(ip), &tuio, credp);
1989 crfree(credp);
1990
1991 if (!code)
1992 return maxlen - tuio.uio_resid;
1993 else
1994 return afs_convert_code(code);
1995 }
1996
1997 #if !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
1998 /* afs_linux_readlink
1999 * Fill target (which is in user space) with contents of symlink.
2000 */
2001 static int
2002 afs_linux_readlink(struct dentry *dp, char *target, int maxlen)
2003 {
2004 int code;
2005 struct inode *ip = dp->d_inode;
2006
2007 AFS_GLOCK();
2008 code = afs_linux_ireadlink(ip, target, maxlen, AFS_UIOUSER);
2009 AFS_GUNLOCK();
2010 return code;
2011 }
2012
2013
2014 /* afs_linux_follow_link
2015 * a file system dependent link following routine.
2016 */
2017 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
2018 static const char *afs_linux_follow_link(struct dentry *dentry, void **link_data)
2019 #else
2020 static int afs_linux_follow_link(struct dentry *dentry, struct nameidata *nd)
2021 #endif
2022 {
2023 int code;
2024 char *name;
2025
2026 name = kmalloc(PATH_MAX, GFP_NOFS);
2027 if (!name) {
2028 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
2029 return ERR_PTR(-EIO);
2030 #else
2031 return -EIO;
2032 #endif
2033 }
2034
2035 AFS_GLOCK();
2036 code = afs_linux_ireadlink(dentry->d_inode, name, PATH_MAX - 1, AFS_UIOSYS);
2037 AFS_GUNLOCK();
2038
2039 if (code < 0) {
2040 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
2041 return ERR_PTR(code);
2042 #else
2043 return code;
2044 #endif
2045 }
2046
2047 name[code] = '\0';
2048 #if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
2049 return *link_data = name;
2050 #else
2051 nd_set_link(nd, name);
2052 return 0;
2053 #endif
2054 }
2055
2056 #if defined(HAVE_LINUX_INODE_OPERATIONS_PUT_LINK_NO_NAMEIDATA)
2057 static void
2058 afs_linux_put_link(struct inode *inode, void *link_data)
2059 {
2060 char *name = link_data;
2061
2062 if (name && !IS_ERR(name))
2063 kfree(name);
2064 }
2065 #else
2066 static void
2067 afs_linux_put_link(struct dentry *dentry, struct nameidata *nd)
2068 {
2069 char *name = nd_get_link(nd);
2070
2071 if (name && !IS_ERR(name))
2072 kfree(name);
2073 }
2074 #endif /* HAVE_LINUX_INODE_OPERATIONS_PUT_LINK_NO_NAMEIDATA */
2075
2076 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
2077
2078 /* Populate a page by filling it from the cache file pointed at by cachefp
2079 * (which contains indicated chunk)
2080 * If task is NULL, the page copy occurs syncronously, and the routine
2081 * returns with page still locked. If task is non-NULL, then page copies
2082 * may occur in the background, and the page will be unlocked when it is
2083 * ready for use.
2084 */
2085 static int
2086 afs_linux_read_cache(struct file *cachefp, struct page *page,
2087 int chunk, struct pagevec *lrupv,
2088 struct afs_pagecopy_task *task) {
2089 loff_t offset = page_offset(page);
2090 struct inode *cacheinode = cachefp->f_dentry->d_inode;
2091 struct page *newpage, *cachepage;
2092 struct address_space *cachemapping;
2093 int pageindex;
2094 int code = 0;
2095
2096 cachemapping = cacheinode->i_mapping;
2097 newpage = NULL;
2098 cachepage = NULL;
2099
2100 /* If we're trying to read a page that's past the end of the disk
2101 * cache file, then just return a zeroed page */
2102 if (AFS_CHUNKOFFSET(offset) >= i_size_read(cacheinode)) {
2103 zero_user_segment(page, 0, PAGE_SIZE);
2104 SetPageUptodate(page);
2105 if (task)
2106 unlock_page(page);
2107 return 0;
2108 }
2109
2110 /* From our offset, we now need to work out which page in the disk
2111 * file it corresponds to. This will be fun ... */
2112 pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_SHIFT;
2113
2114 while (cachepage == NULL) {
2115 cachepage = find_get_page(cachemapping, pageindex);
2116 if (!cachepage) {
2117 if (!newpage)
2118 newpage = page_cache_alloc(cachemapping);
2119 if (!newpage) {
2120 code = -ENOMEM;
2121 goto out;
2122 }
2123
2124 code = add_to_page_cache(newpage, cachemapping,
2125 pageindex, GFP_KERNEL);
2126 if (code == 0) {
2127 cachepage = newpage;
2128 newpage = NULL;
2129
2130 get_page(cachepage);
2131 if (!pagevec_add(lrupv, cachepage))
2132 __pagevec_lru_add_file(lrupv);
2133
2134 } else {
2135 put_page(newpage);
2136 newpage = NULL;
2137 if (code != -EEXIST)
2138 goto out;
2139 }
2140 } else {
2141 lock_page(cachepage);
2142 }
2143 }
2144
2145 if (!PageUptodate(cachepage)) {
2146 ClearPageError(cachepage);
2147 code = cachemapping->a_ops->readpage(NULL, cachepage);
2148 if (!code && !task) {
2149 wait_on_page_locked(cachepage);
2150 }
2151 } else {
2152 unlock_page(cachepage);
2153 }
2154
2155 if (!code) {
2156 if (PageUptodate(cachepage)) {
2157 copy_highpage(page, cachepage);
2158 flush_dcache_page(page);
2159 SetPageUptodate(page);
2160
2161 if (task)
2162 unlock_page(page);
2163 } else if (task) {
2164 afs_pagecopy_queue_page(task, cachepage, page);
2165 } else {
2166 code = -EIO;
2167 }
2168 }
2169
2170 if (code && task) {
2171 unlock_page(page);
2172 }
2173
2174 out:
2175 if (cachepage)
2176 put_page(cachepage);
2177
2178 return code;
2179 }
2180
2181 static int inline
2182 afs_linux_readpage_fastpath(struct file *fp, struct page *pp, int *codep)
2183 {
2184 loff_t offset = page_offset(pp);
2185 struct inode *ip = FILE_INODE(fp);
2186 struct vcache *avc = VTOAFS(ip);
2187 struct dcache *tdc;
2188 struct file *cacheFp = NULL;
2189 int code;
2190 int dcLocked = 0;
2191 struct pagevec lrupv;
2192
2193 /* Not a UFS cache, don't do anything */
2194 if (cacheDiskType != AFS_FCACHE_TYPE_UFS)
2195 return 0;
2196
2197 /* No readpage (ex: tmpfs) , skip */
2198 if (cachefs_noreadpage)
2199 return 0;
2200
2201 /* Can't do anything if the vcache isn't statd , or if the read
2202 * crosses a chunk boundary.
2203 */
2204 if (!(avc->f.states & CStatd) ||
2205 AFS_CHUNK(offset) != AFS_CHUNK(offset + PAGE_SIZE)) {
2206 return 0;
2207 }
2208
2209 ObtainWriteLock(&avc->lock, 911);
2210
2211 /* XXX - See if hinting actually makes things faster !!! */
2212
2213 /* See if we have a suitable entry already cached */
2214 tdc = avc->dchint;
2215
2216 if (tdc) {
2217 /* We need to lock xdcache, then dcache, to handle situations where
2218 * the hint is on the free list. However, we can't safely do this
2219 * according to the locking hierarchy. So, use a non blocking lock.
2220 */
2221 ObtainReadLock(&afs_xdcache);
2222 dcLocked = ( 0 == NBObtainReadLock(&tdc->lock));
2223
2224 if (dcLocked && (tdc->index != NULLIDX)
2225 && !FidCmp(&tdc->f.fid, &avc->f.fid)
2226 && tdc->f.chunk == AFS_CHUNK(offset)
2227 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
2228 /* Bonus - the hint was correct */
2229 afs_RefDCache(tdc);
2230 } else {
2231 /* Only destroy the hint if its actually invalid, not if there's
2232 * just been a locking failure */
2233 if (dcLocked) {
2234 ReleaseReadLock(&tdc->lock);
2235 avc->dchint = NULL;
2236 }
2237
2238 tdc = NULL;
2239 dcLocked = 0;
2240 }
2241 ReleaseReadLock(&afs_xdcache);
2242 }
2243
2244 /* No hint, or hint is no longer valid - see if we can get something
2245 * directly from the dcache
2246 */
2247 if (!tdc)
2248 tdc = afs_FindDCache(avc, offset);
2249
2250 if (!tdc) {
2251 ReleaseWriteLock(&avc->lock);
2252 return 0;
2253 }
2254
2255 if (!dcLocked)
2256 ObtainReadLock(&tdc->lock);
2257
2258 /* Is the dcache we've been given currently up to date */
2259 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
2260 (tdc->dflags & DFFetching))
2261 goto out;
2262
2263 /* Update our hint for future abuse */
2264 avc->dchint = tdc;
2265
2266 /* Okay, so we've now got a cache file that is up to date */
2267
2268 /* XXX - I suspect we should be locking the inodes before we use them! */
2269 AFS_GUNLOCK();
2270 cacheFp = afs_linux_raw_open(&tdc->f.inode);
2271 osi_Assert(cacheFp);
2272 if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
2273 cachefs_noreadpage = 1;
2274 AFS_GLOCK();
2275 goto out;
2276 }
2277 #if defined(PAGEVEC_INIT_COLD_ARG)
2278 pagevec_init(&lrupv, 0);
2279 #else
2280 pagevec_init(&lrupv);
2281 #endif
2282
2283 code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
2284
2285 if (pagevec_count(&lrupv))
2286 __pagevec_lru_add_file(&lrupv);
2287
2288 filp_close(cacheFp, NULL);
2289 AFS_GLOCK();
2290
2291 ReleaseReadLock(&tdc->lock);
2292 ReleaseWriteLock(&avc->lock);
2293 afs_PutDCache(tdc);
2294
2295 *codep = code;
2296 return 1;
2297
2298 out:
2299 ReleaseWriteLock(&avc->lock);
2300 ReleaseReadLock(&tdc->lock);
2301 afs_PutDCache(tdc);
2302 return 0;
2303 }
2304
2305 /* afs_linux_readpage
2306 *
2307 * This function is split into two, because prepare_write/begin_write
2308 * require a readpage call which doesn't unlock the resulting page upon
2309 * success.
2310 */
2311 static int
2312 afs_linux_fillpage(struct file *fp, struct page *pp)
2313 {
2314 afs_int32 code;
2315 char *address;
2316 struct uio *auio;
2317 struct iovec *iovecp;
2318 struct inode *ip = FILE_INODE(fp);
2319 afs_int32 cnt = page_count(pp);
2320 struct vcache *avc = VTOAFS(ip);
2321 afs_offs_t offset = page_offset(pp);
2322 cred_t *credp;
2323
2324 AFS_GLOCK();
2325 if (afs_linux_readpage_fastpath(fp, pp, &code)) {
2326 AFS_GUNLOCK();
2327 return code;
2328 }
2329 AFS_GUNLOCK();
2330
2331 credp = crref();
2332 address = kmap(pp);
2333 ClearPageError(pp);
2334
2335 auio = kmalloc(sizeof(struct uio), GFP_NOFS);
2336 iovecp = kmalloc(sizeof(struct iovec), GFP_NOFS);
2337
2338 setup_uio(auio, iovecp, (char *)address, offset, PAGE_SIZE, UIO_READ,
2339 AFS_UIOSYS);
2340
2341 AFS_GLOCK();
2342 AFS_DISCON_LOCK();
2343 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
2344 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
2345 99999); /* not a possible code value */
2346
2347 code = afs_rdwr(avc, auio, UIO_READ, 0, credp);
2348
2349 afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
2350 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
2351 code);
2352 AFS_DISCON_UNLOCK();
2353 AFS_GUNLOCK();
2354 if (!code) {
2355 /* XXX valid for no-cache also? Check last bits of files... :)
2356 * Cognate code goes in afs_NoCacheFetchProc. */
2357 if (auio->uio_resid) /* zero remainder of page */
2358 memset((void *)(address + (PAGE_SIZE - auio->uio_resid)), 0,
2359 auio->uio_resid);
2360
2361 flush_dcache_page(pp);
2362 SetPageUptodate(pp);
2363 } /* !code */
2364
2365 kunmap(pp);
2366
2367 kfree(auio);
2368 kfree(iovecp);
2369
2370 crfree(credp);
2371 return afs_convert_code(code);
2372 }
2373
2374 static int
2375 afs_linux_prefetch(struct file *fp, struct page *pp)
2376 {
2377 int code = 0;
2378 struct vcache *avc = VTOAFS(FILE_INODE(fp));
2379 afs_offs_t offset = page_offset(pp);
2380
2381 if (AFS_CHUNKOFFSET(offset) == 0) {
2382 struct dcache *tdc;
2383 struct vrequest *treq = NULL;
2384 cred_t *credp;
2385
2386 credp = crref();
2387 AFS_GLOCK();
2388 code = afs_CreateReq(&treq, credp);
2389 if (!code && !NBObtainWriteLock(&avc->lock, 534)) {
2390 tdc = afs_FindDCache(avc, offset);
2391 if (tdc) {
2392 if (!(tdc->mflags & DFNextStarted))
2393 afs_PrefetchChunk(avc, tdc, credp, treq);
2394 afs_PutDCache(tdc);
2395 }
2396 ReleaseWriteLock(&avc->lock);
2397 }
2398 afs_DestroyReq(treq);
2399 AFS_GUNLOCK();
2400 crfree(credp);
2401 }
2402 return afs_convert_code(code);
2403
2404 }
2405
2406 static int
2407 afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
2408 struct list_head *page_list, unsigned num_pages)
2409 {
2410 afs_int32 page_ix;
2411 struct uio *auio;
2412 afs_offs_t offset;
2413 struct iovec* iovecp;
2414 struct nocache_read_request *ancr;
2415 struct page *pp;
2416 struct pagevec lrupv;
2417 afs_int32 code = 0;
2418
2419 cred_t *credp;
2420 struct inode *ip = FILE_INODE(fp);
2421 struct vcache *avc = VTOAFS(ip);
2422 afs_int32 base_index = 0;
2423 afs_int32 page_count = 0;
2424 afs_int32 isize;
2425
2426 /* background thread must free: iovecp, auio, ancr */
2427 iovecp = osi_Alloc(num_pages * sizeof(struct iovec));
2428
2429 auio = osi_Alloc(sizeof(struct uio));
2430 auio->uio_iov = iovecp;
2431 auio->uio_iovcnt = num_pages;
2432 auio->uio_flag = UIO_READ;
2433 auio->uio_seg = AFS_UIOSYS;
2434 auio->uio_resid = num_pages * PAGE_SIZE;
2435
2436 ancr = osi_Alloc(sizeof(struct nocache_read_request));
2437 ancr->auio = auio;
2438 ancr->offset = auio->uio_offset;
2439 ancr->length = auio->uio_resid;
2440
2441 #if defined(PAGEVEC_INIT_COLD_ARG)
2442 pagevec_init(&lrupv, 0);
2443 #else
2444 pagevec_init(&lrupv);
2445 #endif
2446
2447 for(page_ix = 0; page_ix < num_pages; ++page_ix) {
2448
2449 if(list_empty(page_list))
2450 break;
2451
2452 pp = list_entry(page_list->prev, struct page, lru);
2453 /* If we allocate a page and don't remove it from page_list,
2454 * the page cache gets upset. */
2455 list_del(&pp->lru);
2456 isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_SHIFT;
2457 if(pp->index > isize) {
2458 if(PageLocked(pp))
2459 unlock_page(pp);
2460 continue;
2461 }
2462
2463 if(page_ix == 0) {
2464 offset = page_offset(pp);
2465 ancr->offset = auio->uio_offset = offset;
2466 base_index = pp->index;
2467 }
2468 iovecp[page_ix].iov_len = PAGE_SIZE;
2469 code = add_to_page_cache(pp, mapping, pp->index, GFP_KERNEL);
2470 if(base_index != pp->index) {
2471 if(PageLocked(pp))
2472 unlock_page(pp);
2473 put_page(pp);
2474 iovecp[page_ix].iov_base = (void *) 0;
2475 base_index++;
2476 ancr->length -= PAGE_SIZE;
2477 continue;
2478 }
2479 base_index++;
2480 if(code) {
2481 if(PageLocked(pp))
2482 unlock_page(pp);
2483 put_page(pp);
2484 iovecp[page_ix].iov_base = (void *) 0;
2485 } else {
2486 page_count++;
2487 if(!PageLocked(pp)) {
2488 lock_page(pp);
2489 }
2490
2491 /* increment page refcount--our original design assumed
2492 * that locking it would effectively pin it; protect
2493 * ourselves from the possiblity that this assumption is
2494 * is faulty, at low cost (provided we do not fail to
2495 * do the corresponding decref on the other side) */
2496 get_page(pp);
2497
2498 /* save the page for background map */
2499 iovecp[page_ix].iov_base = (void*) pp;
2500
2501 /* and put it on the LRU cache */
2502 if (!pagevec_add(&lrupv, pp))
2503 __pagevec_lru_add_file(&lrupv);
2504 }
2505 }
2506
2507 /* If there were useful pages in the page list, make sure all pages
2508 * are in the LRU cache, then schedule the read */
2509 if(page_count) {
2510 if (pagevec_count(&lrupv))
2511 __pagevec_lru_add_file(&lrupv);
2512 credp = crref();
2513 code = afs_ReadNoCache(avc, ancr, credp);
2514 crfree(credp);
2515 } else {
2516 /* If there is nothing for the background thread to handle,
2517 * it won't be freeing the things that we never gave it */
2518 osi_Free(iovecp, num_pages * sizeof(struct iovec));
2519 osi_Free(auio, sizeof(struct uio));
2520 osi_Free(ancr, sizeof(struct nocache_read_request));
2521 }
2522 /* we do not flush, release, or unmap pages--that will be
2523 * done for us by the background thread as each page comes in
2524 * from the fileserver */
2525 return afs_convert_code(code);
2526 }
2527
2528
2529 static int
2530 afs_linux_bypass_readpage(struct file *fp, struct page *pp)
2531 {
2532 cred_t *credp = NULL;
2533 struct uio *auio;
2534 struct iovec *iovecp;
2535 struct nocache_read_request *ancr;
2536 int code;
2537
2538 /*
2539 * Special case: if page is at or past end of file, just zero it and set
2540 * it as up to date.
2541 */
2542 if (page_offset(pp) >= i_size_read(fp->f_mapping->host)) {
2543 zero_user_segment(pp, 0, PAGE_SIZE);
2544 SetPageUptodate(pp);
2545 unlock_page(pp);
2546 return 0;
2547 }
2548
2549 ClearPageError(pp);
2550
2551 /* receiver frees */
2552 auio = osi_Alloc(sizeof(struct uio));
2553 iovecp = osi_Alloc(sizeof(struct iovec));
2554
2555 /* address can be NULL, because we overwrite it with 'pp', below */
2556 setup_uio(auio, iovecp, NULL, page_offset(pp),
2557 PAGE_SIZE, UIO_READ, AFS_UIOSYS);
2558
2559 /* save the page for background map */
2560 get_page(pp); /* see above */
2561 auio->uio_iov->iov_base = (void*) pp;
2562 /* the background thread will free this */
2563 ancr = osi_Alloc(sizeof(struct nocache_read_request));
2564 ancr->auio = auio;
2565 ancr->offset = page_offset(pp);
2566 ancr->length = PAGE_SIZE;
2567
2568 credp = crref();
2569 code = afs_ReadNoCache(VTOAFS(FILE_INODE(fp)), ancr, credp);
2570 crfree(credp);
2571
2572 return afs_convert_code(code);
2573 }
2574
2575 static inline int
2576 afs_linux_can_bypass(struct inode *ip) {
2577
2578 switch(cache_bypass_strategy) {
2579 case NEVER_BYPASS_CACHE:
2580 return 0;
2581 case ALWAYS_BYPASS_CACHE:
2582 return 1;
2583 case LARGE_FILES_BYPASS_CACHE:
2584 if (i_size_read(ip) > cache_bypass_threshold)
2585 return 1;
2586 default:
2587 return 0;
2588 }
2589 }
2590
2591 /* Check if a file is permitted to bypass the cache by policy, and modify
2592 * the cache bypass state recorded for that file */
2593
2594 static inline int
2595 afs_linux_bypass_check(struct inode *ip) {
2596 cred_t* credp;
2597
2598 int bypass = afs_linux_can_bypass(ip);
2599
2600 credp = crref();
2601 trydo_cache_transition(VTOAFS(ip), credp, bypass);
2602 crfree(credp);
2603
2604 return bypass;
2605 }
2606
2607
2608 static int
2609 afs_linux_readpage(struct file *fp, struct page *pp)
2610 {
2611 int code;
2612
2613 if (afs_linux_bypass_check(FILE_INODE(fp))) {
2614 code = afs_linux_bypass_readpage(fp, pp);
2615 } else {
2616 code = afs_linux_fillpage(fp, pp);
2617 if (!code)
2618 code = afs_linux_prefetch(fp, pp);
2619 unlock_page(pp);
2620 }
2621
2622 return code;
2623 }
2624
2625 /* Readpages reads a number of pages for a particular file. We use
2626 * this to optimise the reading, by limiting the number of times upon which
2627 * we have to lookup, lock and open vcaches and dcaches
2628 */
2629
2630 static int
2631 afs_linux_readpages(struct file *fp, struct address_space *mapping,
2632 struct list_head *page_list, unsigned int num_pages)
2633 {
2634 struct inode *inode = mapping->host;
2635 struct vcache *avc = VTOAFS(inode);
2636 struct dcache *tdc;
2637 struct file *cacheFp = NULL;
2638 int code;
2639 unsigned int page_idx;
2640 loff_t offset;
2641 struct pagevec lrupv;
2642 struct afs_pagecopy_task *task;
2643
2644 if (afs_linux_bypass_check(inode))
2645 return afs_linux_bypass_readpages(fp, mapping, page_list, num_pages);
2646
2647 if (cacheDiskType == AFS_FCACHE_TYPE_MEM)
2648 return 0;
2649
2650 /* No readpage (ex: tmpfs) , skip */
2651 if (cachefs_noreadpage)
2652 return 0;
2653
2654 AFS_GLOCK();
2655 if ((code = afs_linux_VerifyVCache(avc, NULL))) {
2656 AFS_GUNLOCK();
2657 return code;
2658 }
2659
2660 ObtainWriteLock(&avc->lock, 912);
2661 AFS_GUNLOCK();
2662
2663 task = afs_pagecopy_init_task();
2664
2665 tdc = NULL;
2666 #if defined(PAGEVEC_INIT_COLD_ARG)
2667 pagevec_init(&lrupv, 0);
2668 #else
2669 pagevec_init(&lrupv);
2670 #endif
2671 for (page_idx = 0; page_idx < num_pages; page_idx++) {
2672 struct page *page = list_entry(page_list->prev, struct page, lru);
2673 list_del(&page->lru);
2674 offset = page_offset(page);
2675
2676 if (tdc && tdc->f.chunk != AFS_CHUNK(offset)) {
2677 AFS_GLOCK();
2678 ReleaseReadLock(&tdc->lock);
2679 afs_PutDCache(tdc);
2680 AFS_GUNLOCK();
2681 tdc = NULL;
2682 if (cacheFp)
2683 filp_close(cacheFp, NULL);
2684 }
2685
2686 if (!tdc) {
2687 AFS_GLOCK();
2688 if ((tdc = afs_FindDCache(avc, offset))) {
2689 ObtainReadLock(&tdc->lock);
2690 if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
2691 (tdc->dflags & DFFetching)) {
2692 ReleaseReadLock(&tdc->lock);
2693 afs_PutDCache(tdc);
2694 tdc = NULL;
2695 }
2696 }
2697 AFS_GUNLOCK();
2698 if (tdc) {
2699 cacheFp = afs_linux_raw_open(&tdc->f.inode);
2700 osi_Assert(cacheFp);
2701 if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
2702 cachefs_noreadpage = 1;
2703 goto out;
2704 }
2705 }
2706 }
2707
2708 if (tdc && !add_to_page_cache(page, mapping, page->index,
2709 GFP_KERNEL)) {
2710 get_page(page);
2711 if (!pagevec_add(&lrupv, page))
2712 __pagevec_lru_add_file(&lrupv);
2713
2714 afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
2715 }
2716 put_page(page);
2717 }
2718 if (pagevec_count(&lrupv))
2719 __pagevec_lru_add_file(&lrupv);
2720
2721 out:
2722 if (tdc)
2723 filp_close(cacheFp, NULL);
2724
2725 afs_pagecopy_put_task(task);
2726
2727 AFS_GLOCK();
2728 if (tdc) {
2729 ReleaseReadLock(&tdc->lock);
2730 afs_PutDCache(tdc);
2731 }
2732
2733 ReleaseWriteLock(&avc->lock);
2734 AFS_GUNLOCK();
2735 return 0;
2736 }
2737
2738 /* Prepare an AFS vcache for writeback. Should be called with the vcache
2739 * locked */
2740 static inline int
2741 afs_linux_prepare_writeback(struct vcache *avc) {
2742 pid_t pid;
2743 struct pagewriter *pw;
2744
2745 pid = MyPidxx2Pid(MyPidxx);
2746 /* Prevent recursion into the writeback code */
2747 spin_lock(&avc->pagewriter_lock);
2748 list_for_each_entry(pw, &avc->pagewriters, link) {
2749 if (pw->writer == pid) {
2750 spin_unlock(&avc->pagewriter_lock);
2751 return AOP_WRITEPAGE_ACTIVATE;
2752 }
2753 }
2754 spin_unlock(&avc->pagewriter_lock);
2755
2756 /* Add ourselves to writer list */
2757 pw = osi_Alloc(sizeof(struct pagewriter));
2758 pw->writer = pid;
2759 spin_lock(&avc->pagewriter_lock);
2760 list_add_tail(&pw->link, &avc->pagewriters);
2761 spin_unlock(&avc->pagewriter_lock);
2762
2763 return 0;
2764 }
2765
2766 static inline int
2767 afs_linux_dopartialwrite(struct vcache *avc, cred_t *credp) {
2768 struct vrequest *treq = NULL;
2769 int code = 0;
2770
2771 if (!afs_CreateReq(&treq, credp)) {
2772 code = afs_DoPartialWrite(avc, treq);
2773 afs_DestroyReq(treq);
2774 }
2775
2776 return afs_convert_code(code);
2777 }
2778
2779 static inline void
2780 afs_linux_complete_writeback(struct vcache *avc) {
2781 struct pagewriter *pw, *store;
2782 pid_t pid;
2783 struct list_head tofree;
2784
2785 INIT_LIST_HEAD(&tofree);
2786 pid = MyPidxx2Pid(MyPidxx);
2787 /* Remove ourselves from writer list */
2788 spin_lock(&avc->pagewriter_lock);
2789 list_for_each_entry_safe(pw, store, &avc->pagewriters, link) {
2790 if (pw->writer == pid) {
2791 list_del(&pw->link);
2792 /* osi_Free may sleep so we need to defer it */
2793 list_add_tail(&pw->link, &tofree);
2794 }
2795 }
2796 spin_unlock(&avc->pagewriter_lock);
2797 list_for_each_entry_safe(pw, store, &tofree, link) {
2798 list_del(&pw->link);
2799 osi_Free(pw, sizeof(struct pagewriter));
2800 }
2801 }
2802
2803 /* Writeback a given page syncronously. Called with no AFS locks held */
2804 static int
2805 afs_linux_page_writeback(struct inode *ip, struct page *pp,
2806 unsigned long offset, unsigned int count,
2807 cred_t *credp)
2808 {
2809 struct vcache *vcp = VTOAFS(ip);
2810 char *buffer;
2811 afs_offs_t base;
2812 int code = 0;
2813 struct uio tuio;
2814 struct iovec iovec;
2815 int f_flags = 0;
2816
2817 memset(&tuio, 0, sizeof(tuio));
2818 memset(&iovec, 0, sizeof(iovec));
2819
2820 buffer = kmap(pp) + offset;
2821 base = page_offset(pp) + offset;
2822
2823 AFS_GLOCK();
2824 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2825 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2826 ICL_TYPE_INT32, 99999);
2827
2828 setup_uio(&tuio, &iovec, buffer, base, count, UIO_WRITE, AFS_UIOSYS);
2829
2830 code = afs_write(vcp, &tuio, f_flags, credp, 0);
2831
2832 i_size_write(ip, vcp->f.m.Length);
2833 ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
2834
2835 code = code ? afs_convert_code(code) : count - tuio.uio_resid;
2836
2837 afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
2838 ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
2839 ICL_TYPE_INT32, code);
2840
2841 AFS_GUNLOCK();
2842 kunmap(pp);
2843
2844 return code;
2845 }
2846
2847 static int
2848 afs_linux_writepage_sync(struct inode *ip, struct page *pp,
2849 unsigned long offset, unsigned int count)
2850 {
2851 int code;
2852 int code1 = 0;
2853 struct vcache *vcp = VTOAFS(ip);
2854 cred_t *credp;
2855
2856 /* Catch recursive writeback. This occurs if the kernel decides
2857 * writeback is required whilst we are writing to the cache, or
2858 * flushing to the server. When we're running syncronously (as
2859 * opposed to from writepage) we can't actually do anything about
2860 * this case - as we can't return AOP_WRITEPAGE_ACTIVATE to write()
2861 */
2862 AFS_GLOCK();
2863 ObtainWriteLock(&vcp->lock, 532);
2864 afs_linux_prepare_writeback(vcp);
2865 ReleaseWriteLock(&vcp->lock);
2866 AFS_GUNLOCK();
2867
2868 credp = crref();
2869 code = afs_linux_page_writeback(ip, pp, offset, count, credp);
2870
2871 AFS_GLOCK();
2872 ObtainWriteLock(&vcp->lock, 533);
2873 if (code > 0)
2874 code1 = afs_linux_dopartialwrite(vcp, credp);
2875 afs_linux_complete_writeback(vcp);
2876 ReleaseWriteLock(&vcp->lock);
2877 AFS_GUNLOCK();
2878 crfree(credp);
2879
2880 if (code1)
2881 return code1;
2882
2883 return code;
2884 }
2885
2886 static int
2887 #ifdef AOP_WRITEPAGE_TAKES_WRITEBACK_CONTROL
2888 afs_linux_writepage(struct page *pp, struct writeback_control *wbc)
2889 #else
2890 afs_linux_writepage(struct page *pp)
2891 #endif
2892 {
2893 struct address_space *mapping = pp->mapping;
2894 struct inode *inode;
2895 struct vcache *vcp;
2896 cred_t *credp;
2897 unsigned int to = PAGE_SIZE;
2898 loff_t isize;
2899 int code = 0;
2900 int code1 = 0;
2901
2902 get_page(pp);
2903
2904 inode = mapping->host;
2905 vcp = VTOAFS(inode);
2906 isize = i_size_read(inode);
2907
2908 /* Don't defeat an earlier truncate */
2909 if (page_offset(pp) > isize) {
2910 set_page_writeback(pp);
2911 unlock_page(pp);
2912 goto done;
2913 }
2914
2915 AFS_GLOCK();
2916 ObtainWriteLock(&vcp->lock, 537);
2917 code = afs_linux_prepare_writeback(vcp);
2918 if (code == AOP_WRITEPAGE_ACTIVATE) {
2919 /* WRITEPAGE_ACTIVATE is the only return value that permits us
2920 * to return with the page still locked */
2921 ReleaseWriteLock(&vcp->lock);
2922 AFS_GUNLOCK();
2923 return code;
2924 }
2925
2926 /* Grab the creds structure currently held in the vnode, and
2927 * get a reference to it, in case it goes away ... */
2928 credp = vcp->cred;
2929 if (credp)
2930 crhold(credp);
2931 else
2932 credp = crref();
2933 ReleaseWriteLock(&vcp->lock);
2934 AFS_GUNLOCK();
2935
2936 set_page_writeback(pp);
2937
2938 SetPageUptodate(pp);
2939
2940 /* We can unlock the page here, because it's protected by the
2941 * page_writeback flag. This should make us less vulnerable to
2942 * deadlocking in afs_write and afs_DoPartialWrite
2943 */
2944 unlock_page(pp);
2945
2946 /* If this is the final page, then just write the number of bytes that
2947 * are actually in it */
2948 if ((isize - page_offset(pp)) < to )
2949 to = isize - page_offset(pp);
2950
2951 code = afs_linux_page_writeback(inode, pp, 0, to, credp);
2952
2953 AFS_GLOCK();
2954 ObtainWriteLock(&vcp->lock, 538);
2955
2956 /* As much as we might like to ignore a file server error here,
2957 * and just try again when we close(), unfortunately StoreAllSegments
2958 * will invalidate our chunks if the server returns a permanent error,
2959 * so we need to at least try and get that error back to the user
2960 */
2961 if (code == to)
2962 code1 = afs_linux_dopartialwrite(vcp, credp);
2963
2964 afs_linux_complete_writeback(vcp);
2965 ReleaseWriteLock(&vcp->lock);
2966 crfree(credp);
2967 AFS_GUNLOCK();
2968
2969 done:
2970 end_page_writeback(pp);
2971 put_page(pp);
2972
2973 if (code1)
2974 return code1;
2975
2976 if (code == to)
2977 return 0;
2978
2979 return code;
2980 }
2981
2982 /* afs_linux_permission
2983 * Check access rights - returns error if can't check or permission denied.
2984 */
2985 static int
2986 #if defined(IOP_PERMISSION_TAKES_FLAGS)
2987 afs_linux_permission(struct inode *ip, int mode, unsigned int flags)
2988 #elif defined(IOP_PERMISSION_TAKES_NAMEIDATA)
2989 afs_linux_permission(struct inode *ip, int mode, struct nameidata *nd)
2990 #else
2991 afs_linux_permission(struct inode *ip, int mode)
2992 #endif
2993 {
2994 int code;
2995 cred_t *credp;
2996 int tmp = 0;
2997
2998 /* Check for RCU path walking */
2999 #if defined(IOP_PERMISSION_TAKES_FLAGS)
3000 if (flags & IPERM_FLAG_RCU)
3001 return -ECHILD;
3002 #elif defined(MAY_NOT_BLOCK)
3003 if (mode & MAY_NOT_BLOCK)
3004 return -ECHILD;
3005 #endif
3006
3007 credp = crref();
3008 AFS_GLOCK();
3009 if (mode & MAY_EXEC)
3010 tmp |= VEXEC;
3011 if (mode & MAY_READ)
3012 tmp |= VREAD;
3013 if (mode & MAY_WRITE)
3014 tmp |= VWRITE;
3015 code = afs_access(VTOAFS(ip), tmp, credp);
3016
3017 AFS_GUNLOCK();
3018 crfree(credp);
3019 return afs_convert_code(code);
3020 }
3021
3022 static int
3023 afs_linux_commit_write(struct file *file, struct page *page, unsigned offset,
3024 unsigned to)
3025 {
3026 int code;
3027 struct inode *inode = FILE_INODE(file);
3028 loff_t pagebase = page_offset(page);
3029
3030 if (i_size_read(inode) < (pagebase + offset))
3031 i_size_write(inode, pagebase + offset);
3032
3033 if (PageChecked(page)) {
3034 SetPageUptodate(page);
3035 ClearPageChecked(page);
3036 }
3037
3038 code = afs_linux_writepage_sync(inode, page, offset, to - offset);
3039
3040 return code;
3041 }
3042
3043 static int
3044 afs_linux_prepare_write(struct file *file, struct page *page, unsigned from,
3045 unsigned to)
3046 {
3047
3048 /* http://kerneltrap.org/node/4941 details the expected behaviour of
3049 * prepare_write. Essentially, if the page exists within the file,
3050 * and is not being fully written, then we should populate it.
3051 */
3052
3053 if (!PageUptodate(page)) {
3054 loff_t pagebase = page_offset(page);
3055 loff_t isize = i_size_read(page->mapping->host);
3056
3057 /* Is the location we are writing to beyond the end of the file? */
3058 if (pagebase >= isize ||
3059 ((from == 0) && (pagebase + to) >= isize)) {
3060 zero_user_segments(page, 0, from, to, PAGE_SIZE);
3061 SetPageChecked(page);
3062 /* Are we we writing a full page */
3063 } else if (from == 0 && to == PAGE_SIZE) {
3064 SetPageChecked(page);
3065 /* Is the page readable, if it's wronly, we don't care, because we're
3066 * not actually going to read from it ... */
3067 } else if ((file->f_flags && O_ACCMODE) != O_WRONLY) {
3068 /* We don't care if fillpage fails, because if it does the page
3069 * won't be marked as up to date
3070 */
3071 afs_linux_fillpage(file, page);
3072 }
3073 }
3074 return 0;
3075 }
3076
3077 #if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
3078 static int
3079 afs_linux_write_end(struct file *file, struct address_space *mapping,
3080 loff_t pos, unsigned len, unsigned copied,
3081 struct page *page, void *fsdata)
3082 {
3083 int code;
3084 unsigned int from = pos & (PAGE_SIZE - 1);
3085
3086 code = afs_linux_commit_write(file, page, from, from + copied);
3087
3088 unlock_page(page);
3089 put_page(page);
3090 return code;
3091 }
3092
3093 static int
3094 afs_linux_write_begin(struct file *file, struct address_space *mapping,
3095 loff_t pos, unsigned len, unsigned flags,
3096 struct page **pagep, void **fsdata)
3097 {
3098 struct page *page;
3099 pgoff_t index = pos >> PAGE_SHIFT;
3100 unsigned int from = pos & (PAGE_SIZE - 1);
3101 int code;
3102
3103 page = grab_cache_page_write_begin(mapping, index, flags);
3104 if (!page) {
3105 return -ENOMEM;
3106 }
3107
3108 *pagep = page;
3109
3110 code = afs_linux_prepare_write(file, page, from, from + len);
3111 if (code) {
3112 unlock_page(page);
3113 put_page(page);
3114 }
3115
3116 return code;
3117 }
3118 #endif
3119
3120 #ifndef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
3121 static void *
3122 afs_linux_dir_follow_link(struct dentry *dentry, struct nameidata *nd)
3123 {
3124 struct dentry **dpp;
3125 struct dentry *target;
3126
3127 if (current->total_link_count > 0) {
3128 /* avoid symlink resolution limits when resolving; we cannot contribute to
3129 * an infinite symlink loop */
3130 /* only do this for follow_link when total_link_count is positive to be
3131 * on the safe side; there is at least one code path in the Linux
3132 * kernel where it seems like it may be possible to get here without
3133 * total_link_count getting incremented. it is not clear on how that
3134 * path is actually reached, but guard against it just to be safe */
3135 current->total_link_count--;
3136 }
3137
3138 target = canonical_dentry(dentry->d_inode);
3139
3140 # ifdef STRUCT_NAMEIDATA_HAS_PATH
3141 dpp = &nd->path.dentry;
3142 # else
3143 dpp = &nd->dentry;
3144 # endif
3145
3146 dput(*dpp);
3147
3148 if (target) {
3149 *dpp = target;
3150 } else {
3151 *dpp = dget(dentry);
3152 }
3153
3154 nd->last_type = LAST_BIND;
3155
3156 return NULL;
3157 }
3158 #endif /* !STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
3159
3160
3161 static struct inode_operations afs_file_iops = {
3162 .permission = afs_linux_permission,
3163 .getattr = afs_linux_getattr,
3164 .setattr = afs_notify_change,
3165 };
3166
3167 static struct address_space_operations afs_file_aops = {
3168 .readpage = afs_linux_readpage,
3169 .readpages = afs_linux_readpages,
3170 .writepage = afs_linux_writepage,
3171 #if defined (STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
3172 .write_begin = afs_linux_write_begin,
3173 .write_end = afs_linux_write_end,
3174 #else
3175 .commit_write = afs_linux_commit_write,
3176 .prepare_write = afs_linux_prepare_write,
3177 #endif
3178 };
3179
3180
3181 /* Separate ops vector for directories. Linux 2.2 tests type of inode
3182 * by what sort of operation is allowed.....
3183 */
3184
3185 static struct inode_operations afs_dir_iops = {
3186 .setattr = afs_notify_change,
3187 .create = afs_linux_create,
3188 .lookup = afs_linux_lookup,
3189 .link = afs_linux_link,
3190 .unlink = afs_linux_unlink,
3191 .symlink = afs_linux_symlink,
3192 .mkdir = afs_linux_mkdir,
3193 .rmdir = afs_linux_rmdir,
3194 .rename = afs_linux_rename,
3195 .getattr = afs_linux_getattr,
3196 .permission = afs_linux_permission,
3197 #ifndef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
3198 .follow_link = afs_linux_dir_follow_link,
3199 #endif
3200 };
3201
3202 /* We really need a separate symlink set of ops, since do_follow_link()
3203 * determines if it _is_ a link by checking if the follow_link op is set.
3204 */
3205 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
3206 static int
3207 afs_symlink_filler(struct file *file, struct page *page)
3208 {
3209 struct inode *ip = (struct inode *)page->mapping->host;
3210 char *p = (char *)kmap(page);
3211 int code;
3212
3213 AFS_GLOCK();
3214 code = afs_linux_ireadlink(ip, p, PAGE_SIZE, AFS_UIOSYS);
3215 AFS_GUNLOCK();
3216
3217 if (code < 0)
3218 goto fail;
3219 p[code] = '\0'; /* null terminate? */
3220
3221 SetPageUptodate(page);
3222 kunmap(page);
3223 unlock_page(page);
3224 return 0;
3225
3226 fail:
3227 SetPageError(page);
3228 kunmap(page);
3229 unlock_page(page);
3230 return code;
3231 }
3232
3233 static struct address_space_operations afs_symlink_aops = {
3234 .readpage = afs_symlink_filler
3235 };
3236 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
3237
3238 static struct inode_operations afs_symlink_iops = {
3239 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
3240 .readlink = page_readlink,
3241 # if defined(HAVE_LINUX_PAGE_GET_LINK)
3242 .get_link = page_get_link,
3243 # elif defined(HAVE_LINUX_PAGE_FOLLOW_LINK)
3244 .follow_link = page_follow_link,
3245 # else
3246 .follow_link = page_follow_link_light,
3247 .put_link = page_put_link,
3248 # endif
3249 #else /* !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE) */
3250 .readlink = afs_linux_readlink,
3251 .follow_link = afs_linux_follow_link,
3252 .put_link = afs_linux_put_link,
3253 #endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
3254 .setattr = afs_notify_change,
3255 };
3256
3257 void
3258 afs_fill_inode(struct inode *ip, struct vattr *vattr)
3259 {
3260 if (vattr)
3261 vattr2inode(ip, vattr);
3262
3263 #ifdef STRUCT_ADDRESS_SPACE_HAS_BACKING_DEV_INFO
3264 ip->i_mapping->backing_dev_info = afs_backing_dev_info;
3265 #endif
3266 /* Reset ops if symlink or directory. */
3267 if (S_ISREG(ip->i_mode)) {
3268 ip->i_op = &afs_file_iops;
3269 ip->i_fop = &afs_file_fops;
3270 ip->i_data.a_ops = &afs_file_aops;
3271
3272 } else if (S_ISDIR(ip->i_mode)) {
3273 ip->i_op = &afs_dir_iops;
3274 ip->i_fop = &afs_dir_fops;
3275
3276 } else if (S_ISLNK(ip->i_mode)) {
3277 ip->i_op = &afs_symlink_iops;
3278 #if defined(HAVE_LINUX_INODE_NOHIGHMEM)
3279 inode_nohighmem(ip);
3280 #endif
3281 #if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
3282 ip->i_data.a_ops = &afs_symlink_aops;
3283 ip->i_mapping = &ip->i_data;
3284 #endif
3285 }
3286
3287 }