Import Upstream version 1.8.5
[hcoop/debian/openafs.git] / src / afs / AIX / osi_vnodeops.c
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10 #include <afsconfig.h>
11 #include "afs/param.h"
12
13
14 #include "h/systm.h"
15 #include "h/types.h"
16 #include "h/errno.h"
17 #include "h/stat.h"
18 #include "h/user.h"
19 #include "h/uio.h"
20 #include "h/vattr.h"
21 #include "h/file.h"
22 #include "h/vfs.h"
23 #include "h/chownx.h"
24 #include "h/systm.h"
25 #include "h/access.h"
26 #ifdef AFS_AIX51_ENV
27 #include "h/acl.h"
28 #endif
29 #include "rpc/types.h"
30 #include "osi_vfs.h"
31 #include "netinet/in.h"
32 #include "h/mbuf.h"
33 #include "h/vmuser.h"
34 #include "h/shm.h"
35 #include "rpc/xdr.h"
36
37 #include "afs/stds.h"
38 #include "afs/afs_osi.h"
39 #define RFTP_INTERNALS 1
40 #include "afs/volerrors.h"
41 #include "afsint.h"
42 #include "vldbint.h"
43 #include "afs/lock.h"
44 #include "afs/exporter.h"
45 #include "afs/afs.h"
46 #include "afs/afs_chunkops.h"
47 #include "afs/afs_stats.h"
48 #include "afs/nfsclient.h"
49 #include "afs/icl.h"
50 #include "afs/prs_fs.h"
51 #include "h/flock.h"
52 #include "afsincludes.h"
53
54
55 int
56 afs_gn_link(struct vnode *vp,
57 struct vnode *dp,
58 char *name,
59 struct ucred *cred)
60 {
61 int error;
62
63 AFS_STATCNT(afs_gn_link);
64 error = afs_link(vp, dp, name, cred);
65 afs_Trace3(afs_iclSetp, CM_TRACE_GNLINK, ICL_TYPE_POINTER, vp,
66 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
67 return (error);
68 }
69
70
71 int
72 afs_gn_mkdir(struct vnode *dp,
73 char *name,
74 int32long64_t Mode,
75 struct ucred *cred)
76 {
77 struct vattr va;
78 struct vnode *vp;
79 int error;
80 int mode = Mode;
81
82 AFS_STATCNT(afs_gn_mkdir);
83 VATTR_NULL(&va);
84 va.va_type = VDIR;
85 va.va_mode = (mode & 07777) & ~get_umask();
86 error = afs_mkdir(dp, name, &va, &vp, cred);
87 if (!error) {
88 AFS_RELE(vp);
89 }
90 afs_Trace4(afs_iclSetp, CM_TRACE_GMKDIR, ICL_TYPE_POINTER, vp,
91 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
92 error);
93 return (error);
94 }
95
96
97 int
98 afs_gn_mknod(struct vnode *dp,
99 char *name,
100 int32long64_t Mode,
101 dev_t dev,
102 struct ucred *cred)
103 {
104 struct vattr va;
105 struct vnode *vp;
106 int error;
107 int mode = Mode;
108
109 AFS_STATCNT(afs_gn_mknod);
110 VATTR_NULL(&va);
111 va.va_type = IFTOVT(mode);
112 va.va_mode = (mode & 07777) & ~get_umask();
113
114 /**** I'm not sure if suser() should stay here since it makes no sense in AFS; however the documentation says that one "should be super-user unless making a FIFO file. Others systems such as SUN do this checking in the early stages of mknod (before the abstraction), so it's equivalently the same! *****/
115 if (va.va_type != VFIFO && !suser((char *)&error))
116 return (EPERM);
117 switch (va.va_type) {
118 case VDIR:
119 error = afs_mkdir(dp, name, &va, &vp, cred);
120 break;
121 case VNON:
122 error = EINVAL;
123 break;
124 case VBAD:
125 case VCHR:
126 case VBLK:
127 va.va_rdev = dev;
128 default:
129 error = afs_create(VTOAFS(dp), name, &va, NONEXCL, mode, (struct vcache **)&vp, cred);
130 }
131 if (!error) {
132 AFS_RELE(vp);
133 }
134 afs_Trace4(afs_iclSetp, CM_TRACE_GMKNOD, ICL_TYPE_POINTER, (afs_int32) vp,
135 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
136 error);
137 return (error);
138 }
139
140
141 int
142 afs_gn_remove(struct vnode *vp, /* Ignored in AFS */
143 struct vnode * dp,
144 char *name,
145 struct ucred *cred)
146 {
147 int error;
148
149 AFS_STATCNT(afs_gn_remove);
150 error = afs_remove(dp, name, cred);
151 afs_Trace3(afs_iclSetp, CM_TRACE_GREMOVE, ICL_TYPE_POINTER, dp,
152 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
153 return (error);
154 }
155
156
157 int
158 afs_gn_rename(struct vnode *vp, /* Ignored in AFS */
159 struct vnode *dp,
160 char *name,
161 struct vnode *tp, /* Ignored in AFS */
162 struct vnode *tdp,
163 char *tname,
164 struct ucred *cred)
165 {
166 int error;
167
168 AFS_STATCNT(afs_gn_rename);
169 error = afs_rename(dp, name, tdp, tname, cred);
170 afs_Trace4(afs_iclSetp, CM_TRACE_GRENAME, ICL_TYPE_POINTER, dp,
171 ICL_TYPE_STRING, name, ICL_TYPE_STRING, tname, ICL_TYPE_LONG,
172 error);
173 return (error);
174 }
175
176
177 int
178 afs_gn_rmdir(struct vnode *vp, /* Ignored in AFS */
179 struct vnode *dp,
180 char *name,
181 struct ucred *cred)
182 {
183 int error;
184
185 AFS_STATCNT(afs_gn_rmdir);
186 error = afs_rmdir(dp, name, cred);
187 if (error) {
188 if (error == 66 /* 4.3's ENOTEMPTY */ )
189 error = EEXIST; /* AIX returns EEXIST where 4.3 used ENOTEMPTY */
190 }
191 afs_Trace3(afs_iclSetp, CM_TRACE_GRMDIR, ICL_TYPE_POINTER, dp,
192 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
193 return (error);
194 }
195
196
197 int
198 afs_gn_lookup(struct vnode *dp,
199 struct vnode **vpp,
200 char *name,
201 int32long64_t Flags, /* includes FOLLOW... */
202 struct vattr *vattrp,
203 struct ucred *cred)
204 {
205 int error;
206 int flags = Flags;
207
208 AFS_STATCNT(afs_gn_lookup);
209 error = afs_lookup(dp, name, vpp, cred);
210 afs_Trace3(afs_iclSetp, CM_TRACE_GLOOKUP, ICL_TYPE_POINTER, dp,
211 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
212 if (vattrp != NULL && error == 0)
213 afs_gn_getattr(*vpp, vattrp, cred);
214 return (error);
215 }
216
217
218 int
219 afs_gn_fid(struct vnode *vp,
220 struct fid *fidp,
221 struct ucred *cred)
222 {
223 int error;
224
225 AFS_STATCNT(afs_gn_fid);
226 error = afs_fid(vp, fidp);
227 afs_Trace3(afs_iclSetp, CM_TRACE_GFID, ICL_TYPE_POINTER, vp,
228 ICL_TYPE_LONG, (afs_int32) fidp, ICL_TYPE_LONG, error);
229 return (error);
230 }
231
232
233 int
234 afs_gn_open(struct vnode *vp,
235 int32long64_t Flags,
236 ext_t ext,
237 struct ucred **vinfop,
238 struct ucred *cred)
239 {
240 int error;
241 struct vattr va;
242 struct vcache *tvp = VTOAFS(vp);
243 afs_int32 modes;
244 int flags = Flags;
245
246 AFS_STATCNT(afs_gn_open);
247 modes = 0;
248 if ((flags & FREAD))
249 modes |= R_ACC;
250 if ((flags & FEXEC))
251 modes |= X_ACC;
252 if ((flags & FWRITE) || (flags & FTRUNC))
253 modes |= W_ACC;
254
255 while ((flags & FNSHARE) && tvp->opens) {
256 if (!(flags & FDELAY)) {
257 error = ETXTBSY;
258 goto abort;
259 }
260 afs_osi_Sleep(&tvp->opens);
261 }
262
263 error = afs_access(VTOAFS(vp), modes, cred);
264 if (error) {
265 goto abort;
266 }
267
268 error = afs_open((struct vcache **) &vp, flags, cred);
269 if (!error) {
270 if (flags & FTRUNC) {
271 VATTR_NULL(&va);
272 va.va_size = 0;
273 error = afs_setattr(VTOAFS(vp), &va, cred);
274 }
275
276 if (flags & FNSHARE)
277 tvp->f.states |= CNSHARE;
278
279 if (!error) {
280 *vinfop = cred; /* fp->f_vinfo is like fp->f_cred in suns */
281 } else {
282 /* an error occurred; we've told CM that the file
283 * is open, so close it now so that open and
284 * writer counts are correct. Ignore error code,
285 * as it is likely to fail (the setattr just did).
286 */
287 afs_close(vp, flags, cred);
288 }
289 }
290
291 abort:
292 afs_Trace3(afs_iclSetp, CM_TRACE_GOPEN, ICL_TYPE_POINTER, vp,
293 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
294 return (error);
295 }
296
297
298 int
299 afs_gn_create(struct vnode *dp,
300 struct vnode **vpp,
301 int32long64_t Flags,
302 char *name,
303 int32long64_t Mode,
304 struct ucred **vinfop, /* return ptr for fp->f_vinfo, used as fp->f_cred */
305 struct ucred *cred)
306
307 {
308 struct vattr va;
309 enum vcexcl exclusive;
310 int error, modes = 0;
311 int flags = Flags;
312 int mode = Mode;
313
314 AFS_STATCNT(afs_gn_create);
315 if ((flags & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))
316 exclusive = EXCL;
317 else
318 exclusive = NONEXCL;
319 VATTR_NULL(&va);
320 va.va_type = VREG;
321 va.va_mode = (mode & 07777) & ~get_umask();
322 if ((flags & FREAD))
323 modes |= R_ACC;
324 if ((flags & FEXEC))
325 modes |= X_ACC;
326 if ((flags & FWRITE) || (flags & FTRUNC))
327 modes |= W_ACC;
328 error = afs_create(VTOAFS(dp), name, &va, exclusive, modes, (struct vcache **)vpp, cred);
329 if (error) {
330 return error;
331 }
332 /* 'cr_luid' is a flag (when it comes thru the NFS server it's set to
333 * RMTUSER_REQ) that determines if we should call afs_open(). We shouldn't
334 * call it when this NFS traffic since the close will never happen thus
335 * we'd never flush the files out to the server! Gross but the simplest
336 * solution we came out with */
337 if (cred->cr_luid != RMTUSER_REQ) {
338 while ((flags & FNSHARE) && VTOAFS(*vpp)->opens) {
339 if (!(flags & FDELAY))
340 return ETXTBSY;
341 afs_osi_Sleep(&VTOAFS(*vpp)->opens);
342 }
343 /* Since in the standard copen() for bsd vnode kernels they do an
344 * vop_open after the vop_create, we must do the open here since there
345 * are stuff in afs_open that we need. For example advance the
346 * execsOrWriters flag (else we'll be treated as the sun's "core"
347 * case). */
348 *vinfop = cred; /* save user creds in fp->f_vinfo */
349 error = afs_open((struct vcache **)vpp, flags, cred);
350 }
351 afs_Trace4(afs_iclSetp, CM_TRACE_GCREATE, ICL_TYPE_POINTER, dp,
352 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
353 error);
354 return error;
355 }
356
357
358 int
359 afs_gn_hold(struct vnode *vp)
360 {
361 AFS_STATCNT(afs_gn_hold);
362 ++(vp->v_count);
363 return (0);
364 }
365
366 int vmPageHog = 0;
367
368 int
369 afs_gn_rele(struct vnode *vp)
370 {
371 struct vcache *vcp = VTOAFS(vp);
372 int error = 0;
373
374 AFS_STATCNT(afs_gn_rele);
375 if (vp->v_count == 0)
376 osi_Panic("afs_rele: zero v_count");
377 if (--(vp->v_count) == 0) {
378 if (vcp->f.states & CPageHog) {
379 vmPageHog--;
380 vcp->f.states &= ~CPageHog;
381 }
382 error = afs_inactive(vp, 0);
383 }
384 return (error);
385 }
386
387
388 int
389 afs_gn_close(struct vnode *vp,
390 int32long64_t Flags,
391 caddr_t vinfo, /* Ignored in AFS */
392 struct ucred *cred)
393 {
394 int error;
395 struct vcache *tvp = VTOAFS(vp);
396 int flags = Flags;
397
398 AFS_STATCNT(afs_gn_close);
399
400 if (flags & FNSHARE) {
401 tvp->f.states &= ~CNSHARE;
402 afs_osi_Wakeup(&tvp->opens);
403 }
404
405 error = afs_close(vp, flags, cred);
406 afs_Trace3(afs_iclSetp, CM_TRACE_GCLOSE, ICL_TYPE_POINTER, (afs_int32) vp,
407 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
408 return (error);
409 }
410
411
412 int
413 afs_gn_map(struct vnode *vp,
414 caddr_t addr,
415 uint32long64_t Len,
416 uint32long64_t Off,
417 uint32long64_t Flag,
418 struct ucred *cred)
419 {
420 struct vcache *vcp = VTOAFS(vp);
421 struct vrequest treq;
422 afs_int32 error;
423 afs_int32 len = Len;
424 afs_int32 off = Off;
425 afs_int32 flag = Flag;
426
427 AFS_STATCNT(afs_gn_map);
428 #ifdef notdef
429 if (error = afs_InitReq(&treq, cred))
430 return error;
431 error = afs_VerifyVCache(vcp, &treq);
432 if (error)
433 return afs_CheckCode(error, &treq, 49);
434 #endif
435 osi_FlushPages(vcp, cred); /* XXX ensure old pages are gone XXX */
436 ObtainWriteLock(&vcp->lock, 401);
437 vcp->f.states |= CMAPPED; /* flag cleared at afs_inactive */
438 /*
439 * We map the segment into our address space using the handle returned by vm_create.
440 */
441 if (!vcp->segid) {
442 afs_uint32 tlen = vcp->f.m.Length;
443 #ifdef AFS_64BIT_CLIENT
444 if (vcp->f.m.Length > afs_vmMappingEnd)
445 tlen = afs_vmMappingEnd;
446 #endif
447 /* Consider V_INTRSEG too for interrupts */
448 if (error =
449 vms_create(&vcp->segid, V_CLIENT, (dev_t) vcp->v.v_gnode, tlen, 0, 0)) {
450 ReleaseWriteLock(&vcp->lock);
451 return (EOPNOTSUPP);
452 }
453 #ifdef AFS_64BIT_KERNEL
454 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
455 #else
456 vcp->vmh = SRVAL(vcp->segid, 0, 0);
457 #endif
458 }
459 vcp->v.v_gnode->gn_seg = vcp->segid; /* XXX Important XXX */
460 if (flag & SHM_RDONLY) {
461 vp->v_gnode->gn_mrdcnt++;
462 } else {
463 vp->v_gnode->gn_mwrcnt++;
464 }
465 /*
466 * We keep the caller's credentials since an async daemon will handle the
467 * request at some point. We assume that the same credentials will be used.
468 */
469 if (!vcp->credp || (vcp->credp != cred)) {
470 crhold(cred);
471 if (vcp->credp) {
472 struct ucred *crp = vcp->credp;
473 vcp->credp = NULL;
474 crfree(crp);
475 }
476 vcp->credp = cred;
477 }
478 ReleaseWriteLock(&vcp->lock);
479 VN_HOLD(vp);
480 afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vp,
481 ICL_TYPE_LONG, addr, ICL_TYPE_LONG, len, ICL_TYPE_LONG, off);
482 return (0);
483 }
484
485
486 int
487 afs_gn_unmap(struct vnode *vp,
488 int32long64_t flag,
489 struct ucred *cred)
490 {
491 struct vcache *vcp = VTOAFS(vp);
492 AFS_STATCNT(afs_gn_unmap);
493 ObtainWriteLock(&vcp->lock, 402);
494 if (flag & SHM_RDONLY) {
495 vp->v_gnode->gn_mrdcnt--;
496 if (vp->v_gnode->gn_mrdcnt <= 0)
497 vp->v_gnode->gn_mrdcnt = 0;
498 } else {
499 vp->v_gnode->gn_mwrcnt--;
500 if (vp->v_gnode->gn_mwrcnt <= 0)
501 vp->v_gnode->gn_mwrcnt = 0;
502 }
503 ReleaseWriteLock(&vcp->lock);
504
505 AFS_RELE(vp);
506 return 0;
507 }
508
509
510 int
511 afs_gn_access(struct vnode *vp,
512 int32long64_t Mode,
513 int32long64_t Who,
514 struct ucred *cred)
515 {
516 int error;
517 struct vattr vattr;
518 int mode = Mode;
519 int who = Who;
520
521 AFS_STATCNT(afs_gn_access);
522 if (mode & ~0x7) {
523 error = EINVAL;
524 goto out;
525 }
526
527 error = afs_access(VTOAFS(vp), mode, cred);
528 if (!error) {
529 /* Additional testing */
530 if (who == ACC_OTHERS || who == ACC_ANY) {
531 error = afs_getattr(VTOAFS(vp), &vattr, cred);
532 if (!error) {
533 if (who == ACC_ANY) {
534 if (((vattr.va_mode >> 6) & mode) == mode) {
535 error = 0;
536 goto out;
537 }
538 }
539 if (((vattr.va_mode >> 3) & mode) == mode)
540 error = 0;
541 else
542 error = EACCES;
543 }
544 } else if (who == ACC_ALL) {
545 error = afs_getattr(VTOAFS(vp), &vattr, cred);
546 if (!error) {
547 if ((!((vattr.va_mode >> 6) & mode))
548 || (!((vattr.va_mode >> 3) & mode))
549 || (!(vattr.va_mode & mode)))
550 error = EACCES;
551 else
552 error = 0;
553 }
554 }
555
556 }
557 out:
558 afs_Trace3(afs_iclSetp, CM_TRACE_GACCESS, ICL_TYPE_POINTER, vp,
559 ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
560 return (error);
561 }
562
563
564 int
565 afs_gn_getattr(struct vnode *vp,
566 struct vattr *vattrp,
567 struct ucred *cred)
568 {
569 int error;
570
571 AFS_STATCNT(afs_gn_getattr);
572 error = afs_getattr(VTOAFS(vp), vattrp, cred);
573 afs_Trace2(afs_iclSetp, CM_TRACE_GGETATTR, ICL_TYPE_POINTER, vp,
574 ICL_TYPE_LONG, error);
575 return (error);
576 }
577
578
579 int
580 afs_gn_setattr(struct vnode *vp,
581 int32long64_t op,
582 int32long64_t arg1,
583 int32long64_t arg2,
584 int32long64_t arg3,
585 struct ucred *cred)
586 {
587 struct vattr va;
588 int error = 0;
589
590 AFS_STATCNT(afs_gn_setattr);
591 VATTR_NULL(&va);
592 switch (op) {
593 /* change mode */
594 case V_MODE:
595 va.va_mode = arg1;
596 break;
597 case V_OWN:
598 if ((arg1 & T_OWNER_AS_IS) == 0)
599 va.va_uid = arg2;
600 if ((arg1 & T_GROUP_AS_IS) == 0)
601 va.va_gid = arg3;
602 break;
603 case V_UTIME:
604 #ifdef notdef
605 error = afs_access(vp, VWRITE, cred);
606 if (error)
607 goto out;
608 #endif
609 if (arg1 & T_SETTIME) {
610 va.va_atime.tv_sec = time;
611 va.va_mtime.tv_sec = time;
612 } else {
613 va.va_atime = *(struct timestruc_t *)arg2;
614 va.va_mtime = *(struct timestruc_t *)arg3;
615 }
616 break;
617 default:
618 error = EINVAL;
619 goto out;
620 }
621
622 error = afs_setattr(VTOAFS(vp), &va, cred);
623 out:
624 afs_Trace2(afs_iclSetp, CM_TRACE_GSETATTR, ICL_TYPE_POINTER, vp,
625 ICL_TYPE_LONG, error);
626 return (error);
627 }
628
629
630 char zero_buffer[PAGESIZE];
631 int
632 afs_gn_fclear(struct vnode *vp,
633 int32long64_t flags,
634 offset_t offset,
635 offset_t length,
636 caddr_t vinfo,
637 struct ucred *cred)
638 {
639 int i, len, error = 0;
640 struct iovec iov;
641 struct uio uio;
642 static int fclear_init = 0;
643 struct vcache *avc = VTOAFS(vp);
644
645 memset(&uio, 0, sizeof(uio));
646 memset(&iov, 0, sizeof(iov));
647
648 AFS_STATCNT(afs_gn_fclear);
649 if (!fclear_init) {
650 memset(zero_buffer, 0, PAGESIZE);
651 fclear_init = 1;
652 }
653 /*
654 * Don't clear past ulimit
655 */
656 if (offset + length > get_ulimit())
657 return (EFBIG);
658
659 /* Flush all pages first */
660 if (avc->segid) {
661 AFS_GUNLOCK();
662 vm_flushp(avc->segid, 0, MAXFSIZE / PAGESIZE - 1);
663 vms_iowait(avc->segid);
664 AFS_GLOCK();
665 }
666 uio.afsio_offset = offset;
667 for (i = offset; i < offset + length; i = uio.afsio_offset) {
668 len = offset + length - i;
669 iov.iov_len = (len > PAGESIZE) ? PAGESIZE : len;
670 iov.iov_base = zero_buffer;
671 uio.afsio_iov = &iov;
672 uio.afsio_iovcnt = 1;
673 uio.afsio_seg = AFS_UIOSYS;
674 uio.afsio_resid = iov.iov_len;
675 if (error = afs_rdwr(VTOAFS(vp), &uio, UIO_WRITE, 0, cred))
676 break;
677 }
678 afs_Trace4(afs_iclSetp, CM_TRACE_GFCLEAR, ICL_TYPE_POINTER, vp,
679 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, length, ICL_TYPE_LONG,
680 error);
681 return (error);
682 }
683
684
685 int
686 afs_gn_fsync(struct vnode *vp,
687 int32long64_t flags, /* Not used by AFS */
688 int32long64_t vinfo, /* Not used by AFS */
689 struct ucred *cred)
690 {
691 int error;
692
693 AFS_STATCNT(afs_gn_fsync);
694 error = afs_fsync(vp, cred);
695 afs_Trace3(afs_iclSetp, CM_TRACE_GFSYNC, ICL_TYPE_POINTER, vp,
696 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
697 return (error);
698 }
699
700
701 int
702 afs_gn_ftrunc(struct vnode *vp,
703 int32long64_t flags,
704 offset_t length,
705 caddr_t vinfo,
706 struct ucred *cred)
707 {
708 struct vattr va;
709 int error;
710
711 AFS_STATCNT(afs_gn_ftrunc);
712 VATTR_NULL(&va);
713 va.va_size = length;
714 error = afs_setattr(VTOAFS(vp), &va, cred);
715 afs_Trace4(afs_iclSetp, CM_TRACE_GFTRUNC, ICL_TYPE_POINTER, vp,
716 ICL_TYPE_LONG, flags, ICL_TYPE_OFFSET,
717 ICL_HANDLE_OFFSET(length), ICL_TYPE_LONG, error);
718 return (error);
719 }
720
721 /* Min size of a file which is dumping core before we declare it a page hog. */
722 #define MIN_PAGE_HOG_SIZE 8388608
723
724 int
725 afs_gn_rdwr(struct vnode *vp,
726 enum uio_rw op,
727 int32long64_t Flags,
728 struct uio *ubuf,
729 ext_t ext, /* Ignored in AFS */
730 caddr_t vinfo, /* Ignored in AFS */
731 struct vattr *vattrp,
732 struct ucred *cred)
733 {
734 struct vcache *vcp = VTOAFS(vp);
735 struct vrequest treq;
736 int error = 0;
737 int free_cred = 0;
738 int flags = Flags;
739
740 AFS_STATCNT(afs_gn_rdwr);
741
742 if (vcp->vc_error) {
743 if (op == UIO_WRITE) {
744 afs_Trace2(afs_iclSetp, CM_TRACE_GRDWR1, ICL_TYPE_POINTER, vp,
745 ICL_TYPE_LONG, vcp->vc_error);
746 return vcp->vc_error;
747 } else
748 return EIO;
749 }
750
751 ObtainSharedLock(&vcp->lock, 507);
752 /*
753 * We keep the caller's credentials since an async daemon will handle the
754 * request at some point. We assume that the same credentials will be used.
755 * If this is being called from an NFS server thread, then dupe the
756 * cred and only use that copy in calls and for the stach.
757 */
758 if (!vcp->credp || (vcp->credp != cred)) {
759 #ifdef AFS_AIX_IAUTH_ENV
760 if (AFS_NFSXLATORREQ(cred)) {
761 /* Must be able to use cred later, so dupe it so that nfs server
762 * doesn't overwrite it's contents.
763 */
764 cred = crdup(cred);
765 free_cred = 1;
766 }
767 #endif
768 crhold(cred); /* Bump refcount for reference in vcache */
769
770 if (vcp->credp) {
771 struct ucred *crp;
772 UpgradeSToWLock(&vcp->lock, 508);
773 crp = vcp->credp;
774 vcp->credp = NULL;
775 ConvertWToSLock(&vcp->lock);
776 crfree(crp);
777 }
778 vcp->credp = cred;
779 }
780 ReleaseSharedLock(&vcp->lock);
781
782 /*
783 * XXX Is the following really required?? XXX
784 */
785 if (error = afs_InitReq(&treq, cred))
786 return error;
787 if (error = afs_VerifyVCache(vcp, &treq))
788 return afs_CheckCode(error, &treq, 50);
789 osi_FlushPages(vcp, cred); /* Flush old pages */
790
791 if (AFS_NFSXLATORREQ(cred)) {
792 if (flags & FSYNC)
793 flags &= ~FSYNC;
794 if (op == UIO_READ) {
795 if (!afs_AccessOK
796 (vcp, PRSFS_READ, &treq,
797 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
798 if (free_cred)
799 crfree(cred);
800 return EACCES;
801 }
802 }
803 }
804
805 /*
806 * We have to bump the open/exwriters field here courtesy of the nfs xlator
807 * because there're no open/close nfs rpcs to call our afs_open/close.
808 * We do a similar thing on the afs_read/write interface.
809 */
810 if (op == UIO_WRITE) {
811 #ifdef AFS_64BIT_CLIENT
812 if (ubuf->afsio_offset < afs_vmMappingEnd) {
813 #endif /* AFS_64BIT_CLIENT */
814 ObtainWriteLock(&vcp->lock, 240);
815 vcp->f.states |= CDirty; /* Set the dirty bit */
816 afs_FakeOpen(vcp);
817 ReleaseWriteLock(&vcp->lock);
818 #ifdef AFS_64BIT_CLIENT
819 }
820 #endif /* AFS_64BIT_CLIENT */
821 }
822
823 error = afs_vm_rdwr(vp, ubuf, op, flags, cred);
824
825 if (op == UIO_WRITE) {
826 #ifdef AFS_64BIT_CLIENT
827 if (ubuf->afsio_offset < afs_vmMappingEnd) {
828 #endif /* AFS_64BIT_CLIENT */
829 ObtainWriteLock(&vcp->lock, 241);
830 afs_FakeClose(vcp, cred); /* XXXX For nfs trans and cores XXXX */
831 ReleaseWriteLock(&vcp->lock);
832 #ifdef AFS_64BIT_CLIENT
833 }
834 #endif /* AFS_64BIT_CLIENT */
835 }
836 if (vattrp != NULL && error == 0)
837 afs_gn_getattr(vp, vattrp, cred);
838
839 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, vp,
840 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, op, ICL_TYPE_LONG, error);
841
842 if (free_cred)
843 crfree(cred);
844 return (error);
845 }
846
847 #define AFS_MAX_VM_CHUNKS 10
848 static int
849 afs_vm_rdwr(struct vnode *vp,
850 struct uio *uiop,
851 enum uio_rw rw,
852 int ioflag,
853 struct ucred *credp)
854 {
855 afs_int32 code = 0;
856 int i;
857 afs_int32 blockSize;
858 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
859 vmsize_t txfrSize;
860 #ifdef AFS_64BIT_CLIENT
861 afs_size_t finalOffset;
862 off_t toffset;
863 int mixed = 0;
864 afs_size_t add2resid = 0;
865 #endif /* AFS_64BIT_CLIENT */
866 struct vcache *vcp = VTOAFS(vp);
867 struct dcache *tdc;
868 afs_size_t start_offset;
869 afs_int32 save_resid = uiop->afsio_resid;
870 int first_page, last_page, pages;
871 int count, len;
872 int counter = 0;
873 struct vrequest treq;
874
875 if (code = afs_InitReq(&treq, credp))
876 return code;
877
878 /* special case easy transfer; apparently a lot are done */
879 if ((xfrSize = uiop->afsio_resid) == 0)
880 return 0;
881
882 ObtainReadLock(&vcp->lock);
883 fileSize = vcp->f.m.Length;
884 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
885 uiop->afsio_offset = fileSize;
886 }
887 /* compute xfrOffset now, and do some checks */
888 xfrOffset = uiop->afsio_offset;
889 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
890 code = EINVAL;
891 ReleaseReadLock(&vcp->lock);
892 goto fail;
893 }
894 #ifndef AFS_64BIT_CLIENT
895 /* check for "file too big" error, which should really be done above us */
896 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
897 code = EFBIG;
898 ReleaseReadLock(&vcp->lock);
899 goto fail;
900 }
901 #endif /* AFS_64BIT_CLIENT */
902
903 #ifdef AFS_64BIT_CLIENT
904 if (xfrOffset + xfrSize > afs_vmMappingEnd) {
905 if (rw == UIO_READ) {
906 /* don't read past EOF */
907 if (xfrSize+xfrOffset > fileSize) {
908 add2resid = xfrSize + xfrOffset - fileSize;
909 xfrSize = fileSize - xfrOffset;
910 if (xfrSize <= 0) {
911 ReleaseReadLock(&vcp->lock);
912 goto fail;
913 }
914 txfrSize = xfrSize;
915 afsio_trim(uiop, txfrSize);
916 }
917 }
918 if (xfrOffset < afs_vmMappingEnd) {
919 /* special case of a buffer crossing the VM mapping line */
920 struct uio tuio;
921 struct iovec tvec[16]; /* Should have access to #define */
922 afs_int32 tsize;
923
924 memset(&tuio, 0, sizeof(tuio));
925 memset(&tvec, 0, sizeof(tvec));
926
927 mixed = 1;
928 finalOffset = xfrOffset + xfrSize;
929 tsize = (afs_size_t) (xfrOffset + xfrSize - afs_vmMappingEnd);
930 txfrSize = xfrSize;
931 afsio_copy(uiop, &tuio, tvec);
932 afsio_skip(&tuio, txfrSize - tsize);
933 afsio_trim(&tuio, tsize);
934 tuio.afsio_offset = afs_vmMappingEnd;
935 ReleaseReadLock(&vcp->lock);
936 ObtainWriteLock(&vcp->lock, 243);
937 afs_FakeClose(vcp, credp); /* XXXX For nfs trans and cores XXXX */
938 ReleaseWriteLock(&vcp->lock);
939 code = afs_direct_rdwr(vp, &tuio, rw, ioflag, credp);
940 ObtainWriteLock(&vcp->lock, 244);
941 afs_FakeOpen(vcp); /* XXXX For nfs trans and cores XXXX */
942 ReleaseWriteLock(&vcp->lock);
943 if (code)
944 goto fail;
945 ObtainReadLock(&vcp->lock);
946 xfrSize = afs_vmMappingEnd - xfrOffset;
947 txfrSize = xfrSize;
948 afsio_trim(uiop, txfrSize);
949 } else {
950 ReleaseReadLock(&vcp->lock);
951 code = afs_direct_rdwr(vp, uiop, rw, ioflag, credp);
952 uiop->uio_resid += add2resid;
953 return code;
954 }
955 }
956 #endif /* AFS_64BIT_CLIENT */
957
958 if (!vcp->segid) {
959 afs_uint32 tlen = vcp->f.m.Length;
960 #ifdef AFS_64BIT_CLIENT
961 if (vcp->f.m.Length > afs_vmMappingEnd)
962 tlen = afs_vmMappingEnd;
963 #endif
964 /* Consider V_INTRSEG too for interrupts */
965 if (code =
966 vms_create(&vcp->segid, V_CLIENT, (dev_t) vcp->v.v_gnode, tlen, 0, 0)) {
967 ReleaseReadLock(&vcp->lock);
968 goto fail;
969 }
970 #ifdef AFS_64BIT_KERNEL
971 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
972 #else
973 vcp->vmh = SRVAL(vcp->segid, 0, 0);
974 #endif
975 }
976 vcp->v.v_gnode->gn_seg = vcp->segid;
977 if (rw == UIO_READ) {
978 ReleaseReadLock(&vcp->lock);
979 /* don't read past EOF */
980 if (xfrSize + xfrOffset > fileSize)
981 xfrSize = fileSize - xfrOffset;
982 if (xfrSize <= 0)
983 goto fail;
984 #ifdef AFS_64BIT_CLIENT
985 toffset = xfrOffset;
986 uiop->afsio_offset = xfrOffset;
987 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
988 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrOffset),
989 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
990 AFS_GUNLOCK();
991 txfrSize = xfrSize;
992 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
993 #else /* AFS_64BIT_CLIENT */
994 AFS_GUNLOCK();
995 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
996 #endif /* AFS_64BIT_CLIENT */
997 AFS_GLOCK();
998 /*
999 * If at a chunk boundary and staying within chunk,
1000 * start prefetch of next chunk.
1001 */
1002 if (counter == 0 || AFS_CHUNKOFFSET(xfrOffset) == 0
1003 && xfrSize <= AFS_CHUNKSIZE(xfrOffset)) {
1004 ObtainWriteLock(&vcp->lock, 407);
1005 tdc = afs_FindDCache(vcp, xfrOffset);
1006 if (tdc) {
1007 if (!(tdc->mflags & DFNextStarted))
1008 afs_PrefetchChunk(vcp, tdc, credp, &treq);
1009 afs_PutDCache(tdc);
1010 }
1011 ReleaseWriteLock(&vcp->lock);
1012 }
1013 #ifdef AFS_64BIT_CLIENT
1014 if (mixed) {
1015 uiop->afsio_offset = finalOffset;
1016 }
1017 uiop->uio_resid += add2resid;
1018 #endif /* AFS_64BIT_CLIENT */
1019 return code;
1020 }
1021
1022 /* UIO_WRITE */
1023 start_offset = uiop->afsio_offset;
1024 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
1025 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(start_offset),
1026 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
1027 ReleaseReadLock(&vcp->lock);
1028 ObtainWriteLock(&vcp->lock, 400);
1029 vcp->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
1030 /* extend file */
1031 /* un-protect last page. */
1032 last_page = vcp->f.m.Length / PAGESIZE;
1033 #ifdef AFS_64BIT_CLIENT
1034 if (vcp->f.m.Length > afs_vmMappingEnd)
1035 last_page = afs_vmMappingEnd / PAGESIZE;
1036 #endif
1037 vm_protectp(vcp->segid, last_page, 1, FILEKEY);
1038 if (xfrSize + xfrOffset > fileSize) {
1039 vcp->f.m.Length = xfrSize + xfrOffset;
1040 }
1041 if ((!(vcp->f.states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
1042 vmPageHog++;
1043 vcp->f.states |= CPageHog;
1044 }
1045 ReleaseWriteLock(&vcp->lock);
1046
1047 /* If the write will fit into a single chunk we'll write all of it
1048 * at once. Otherwise, we'll write one chunk at a time, flushing
1049 * some of it to disk.
1050 */
1051 count = 0;
1052
1053 /* Only create a page to avoid excess VM access if we're writing a
1054 * small file which is either new or completely overwrites the
1055 * existing file.
1056 */
1057 if ((xfrOffset == 0) && (xfrSize < PAGESIZE) && (xfrSize >= fileSize)
1058 && (vcp->v.v_gnode->gn_mwrcnt == 0)
1059 && (vcp->v.v_gnode->gn_mrdcnt == 0)) {
1060 (void)vm_makep(vcp->segid, 0);
1061 }
1062
1063 while (xfrSize > 0) {
1064 offset = AFS_CHUNKBASE(xfrOffset);
1065 len = xfrSize;
1066
1067 if (AFS_CHUNKSIZE(xfrOffset) <= len)
1068 len =
1069 (afs_size_t) AFS_CHUNKSIZE(xfrOffset) - (xfrOffset - offset);
1070
1071 if (len == xfrSize) {
1072 /* All data goes to this one chunk. */
1073 AFS_GUNLOCK();
1074 old_offset = uiop->afsio_offset;
1075 #ifdef AFS_64BIT_CLIENT
1076 uiop->afsio_offset = xfrOffset;
1077 toffset = xfrOffset;
1078 txfrSize = xfrSize;
1079 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
1080 #else /* AFS_64BIT_CLIENT */
1081 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1082 #endif /* AFS_64BIT_CLIENT */
1083 AFS_GLOCK();
1084 if (code) {
1085 goto fail;
1086 }
1087 xfrOffset += len;
1088 xfrSize = 0;
1089 } else {
1090 /* Write just one chunk's worth of data. */
1091 struct uio tuio;
1092 struct iovec tvec[16]; /* Should have access to #define */
1093
1094 memset(&tuio, 0, sizeof(tuio));
1095 memset(&tvec, 0, sizeof(tvec));
1096
1097 /* Purge dirty chunks of file if there are too many dirty chunks.
1098 * Inside the write loop, we only do this at a chunk boundary.
1099 * Clean up partial chunk if necessary at end of loop.
1100 */
1101 if (counter > 0 && code == 0 && xfrOffset == offset) {
1102 ObtainWriteLock(&vcp->lock, 403);
1103 if (xfrOffset > vcp->f.m.Length)
1104 vcp->f.m.Length = xfrOffset;
1105 code = afs_DoPartialWrite(vcp, &treq);
1106 vcp->f.states |= CDirty;
1107 ReleaseWriteLock(&vcp->lock);
1108 if (code) {
1109 goto fail;
1110 }
1111 }
1112 counter++;
1113
1114 afsio_copy(uiop, &tuio, tvec);
1115 afsio_trim(&tuio, len);
1116 tuio.afsio_offset = xfrOffset;
1117
1118 AFS_GUNLOCK();
1119 old_offset = uiop->afsio_offset;
1120 #ifdef AFS_64BIT_CLIENT
1121 toffset = xfrOffset;
1122 code = vm_move(vcp->segid, toffset, len, rw, &tuio);
1123 #else /* AFS_64BIT_CLIENT */
1124 code = vm_move(vcp->segid, xfrOffset, len, rw, &tuio);
1125 #endif /* AFS_64BIT_CLIENT */
1126 AFS_GLOCK();
1127 len -= tuio.afsio_resid;
1128 if (code || (len <= 0)) {
1129 code = code ? code : EINVAL;
1130 goto fail;
1131 }
1132 afsio_skip(uiop, len);
1133 xfrSize -= len;
1134 xfrOffset += len;
1135 }
1136
1137 first_page = (afs_size_t) old_offset >> PGSHIFT;
1138 pages =
1139 1 + (((afs_size_t) old_offset + (len - 1)) >> PGSHIFT) -
1140 first_page;
1141 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE2, ICL_TYPE_POINTER, vcp,
1142 ICL_TYPE_INT32, first_page, ICL_TYPE_INT32, pages);
1143 AFS_GUNLOCK();
1144 code = vm_writep(vcp->segid, first_page, pages);
1145 if (code) {
1146 AFS_GLOCK();
1147 goto fail;
1148 }
1149 if (++count > AFS_MAX_VM_CHUNKS) {
1150 count = 0;
1151 code = vms_iowait(vcp->segid);
1152 if (code) {
1153 /* cache device failure? */
1154 AFS_GLOCK();
1155 goto fail;
1156 }
1157 }
1158 AFS_GLOCK();
1159
1160 }
1161
1162 if (count) {
1163 AFS_GUNLOCK();
1164 code = vms_iowait(vcp->segid);
1165 AFS_GLOCK();
1166 if (code) {
1167 /* cache device failure? */
1168 goto fail;
1169 }
1170 }
1171
1172 ObtainWriteLock(&vcp->lock, 242);
1173 if (code == 0 && (vcp->f.states & CDirty)) {
1174 code = afs_DoPartialWrite(vcp, &treq);
1175 }
1176 vm_protectp(vcp->segid, last_page, 1, RDONLY);
1177 ReleaseWriteLock(&vcp->lock);
1178
1179 /* If requested, fsync the file after every write */
1180 if (ioflag & FSYNC)
1181 afs_fsync(vp, credp);
1182
1183 ObtainReadLock(&vcp->lock);
1184 if (vcp->vc_error) {
1185 /* Pretend we didn't write anything. We need to get the error back to
1186 * the user. If we don't it's possible for a quota error for this
1187 * write to succeed and the file to be closed without the user ever
1188 * having seen the store error. And AIX syscall clears the error if
1189 * anything was written.
1190 */
1191 code = vcp->vc_error;
1192 if (code == EDQUOT || code == ENOSPC)
1193 uiop->afsio_resid = save_resid;
1194 }
1195 #ifdef AFS_64BIT_CLIENT
1196 if (mixed) {
1197 uiop->afsio_offset = finalOffset;
1198 }
1199 #endif /* AFS_64BIT_CLIENT */
1200 ReleaseReadLock(&vcp->lock);
1201
1202 fail:
1203 afs_Trace2(afs_iclSetp, CM_TRACE_VMWRITE3, ICL_TYPE_POINTER, vcp,
1204 ICL_TYPE_INT32, code);
1205 return code;
1206 }
1207
1208
1209 static int
1210 afs_direct_rdwr(struct vnode *vp,
1211 struct uio *uiop,
1212 enum uio_rw rw,
1213 int ioflag,
1214 struct ucred *credp)
1215 {
1216 afs_int32 code = 0;
1217 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
1218 struct vcache *vcp = VTOAFS(vp);
1219 afs_int32 save_resid = uiop->afsio_resid;
1220 struct vrequest treq;
1221
1222 if (code = afs_InitReq(&treq, credp))
1223 return code;
1224
1225 /* special case easy transfer; apparently a lot are done */
1226 if ((xfrSize = uiop->afsio_resid) == 0)
1227 return 0;
1228
1229 ObtainReadLock(&vcp->lock);
1230 fileSize = vcp->f.m.Length;
1231 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1232 uiop->afsio_offset = fileSize;
1233 }
1234 /* compute xfrOffset now, and do some checks */
1235 xfrOffset = uiop->afsio_offset;
1236 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1237 code = EINVAL;
1238 ReleaseReadLock(&vcp->lock);
1239 goto fail;
1240 }
1241
1242 /* check for "file too big" error, which should really be done above us */
1243 #ifdef notdef
1244 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1245 code = EFBIG;
1246 ReleaseReadLock(&vcp->lock);
1247 goto fail;
1248 }
1249 #endif
1250 ReleaseReadLock(&vcp->lock);
1251 if (rw == UIO_WRITE) {
1252 ObtainWriteLock(&vcp->lock, 400);
1253 vcp->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
1254 /* extend file */
1255 if (xfrSize + xfrOffset > fileSize)
1256 vcp->f.m.Length = xfrSize + xfrOffset;
1257 ReleaseWriteLock(&vcp->lock);
1258 }
1259 afs_Trace3(afs_iclSetp, CM_TRACE_DIRECTRDWR, ICL_TYPE_POINTER, vp,
1260 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(uiop->afsio_offset),
1261 ICL_TYPE_LONG, uiop->afsio_resid);
1262 code = afs_rdwr(VTOAFS(vp), uiop, rw, ioflag, credp);
1263 if (code != 0) {
1264 uiop->afsio_resid = save_resid;
1265 } else {
1266 uiop->afsio_offset = xfrOffset + xfrSize;
1267 if (uiop->afsio_resid > 0) {
1268 /* should zero here the remaining buffer */
1269 uiop->afsio_resid = 0;
1270 }
1271 /* Purge dirty chunks of file if there are too many dirty chunks.
1272 * Inside the write loop, we only do this at a chunk boundary.
1273 * Clean up partial chunk if necessary at end of loop.
1274 */
1275 if (AFS_CHUNKBASE(uiop->afsio_offset) != AFS_CHUNKBASE(xfrOffset)) {
1276 ObtainWriteLock(&vcp->lock, 402);
1277 code = afs_DoPartialWrite(vcp, &treq);
1278 vcp->f.states |= CDirty;
1279 ReleaseWriteLock(&vcp->lock);
1280 }
1281 }
1282
1283 fail:
1284 return code;
1285 }
1286
1287
1288 static int
1289 lock_normalize(struct vnode *vp,
1290 struct flock *lckdat,
1291 offset_t offset,
1292 struct ucred *cred)
1293 {
1294 struct vattr vattr;
1295 int code;
1296
1297 switch (lckdat->l_whence) {
1298 case 0:
1299 return 0;
1300 case 1:
1301 lckdat->l_start += (off_t) offset;
1302 break;
1303 case 2:
1304 code = afs_getattr(VTOAFS(vp), &vattr, cred);
1305 if (code != 0)
1306 return code;
1307 lckdat->l_start += (off_t) vattr.va_size;
1308 break;
1309 default:
1310 return EINVAL;
1311 }
1312 lckdat->l_whence = 0;
1313 return 0;
1314 }
1315
1316
1317
1318 int
1319 afs_gn_lockctl(struct vnode *vp,
1320 offset_t offset,
1321 struct eflock *lckdat,
1322 int32long64_t cmd,
1323 int (*ignored_fcn) (),
1324 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
1325 ulong * ignored_id,
1326 #else /* AFS_AIX52_ENV */
1327 ulong32int64_t * ignored_id,
1328 #endif /* AFS_AIX52_ENV */
1329 struct ucred *cred)
1330 {
1331 int error, ncmd = 0;
1332 struct flock flkd;
1333 struct vattr *attrs;
1334
1335 AFS_STATCNT(afs_gn_lockctl);
1336 /* Convert from AIX's cmd to standard lockctl lock types... */
1337 if (cmd == 0)
1338 ncmd = F_GETLK;
1339 else if (cmd & SETFLCK) {
1340 ncmd = F_SETLK;
1341 if (cmd & SLPFLCK)
1342 ncmd = F_SETLKW;
1343 }
1344 flkd.l_type = lckdat->l_type;
1345 flkd.l_whence = lckdat->l_whence;
1346 flkd.l_start = lckdat->l_start;
1347 flkd.l_len = lckdat->l_len;
1348 flkd.l_pid = lckdat->l_pid;
1349 flkd.l_sysid = lckdat->l_sysid;
1350
1351 if (flkd.l_start != lckdat->l_start || flkd.l_len != lckdat->l_len)
1352 return EINVAL;
1353 if (error = lock_normalize(vp, &flkd, offset, cred))
1354 return (error);
1355 error = afs_lockctl(vp, &flkd, ncmd, cred);
1356 lckdat->l_type = flkd.l_type;
1357 lckdat->l_whence = flkd.l_whence;
1358 lckdat->l_start = flkd.l_start;
1359 lckdat->l_len = flkd.l_len;
1360 lckdat->l_pid = flkd.l_pid;
1361 lckdat->l_sysid = flkd.l_sysid;
1362 afs_Trace3(afs_iclSetp, CM_TRACE_GLOCKCTL, ICL_TYPE_POINTER, vp,
1363 ICL_TYPE_LONG, ncmd, ICL_TYPE_LONG, error);
1364 return (error);
1365 }
1366
1367
1368 /* NOTE: In the nfs glue routine (nfs_gn2sun.c) the order was wrong (vp, flags, cmd, arg, ext); was that another typo? */
1369 int
1370 afs_gn_ioctl(struct vnode *vp,
1371 int32long64_t Cmd,
1372 caddr_t arg,
1373 size_t flags, /* Ignored in AFS */
1374 ext_t ext, /* Ignored in AFS */
1375 struct ucred *crp) /* Ignored in AFS */
1376 {
1377 int error;
1378 int cmd = Cmd;
1379
1380 AFS_STATCNT(afs_gn_ioctl);
1381 /* This seems to be a perfect fit for our ioctl redirection (afs_xioctl hack); thus the ioctl(2) entry in sysent.c is unaffected in the aix/afs port. */
1382 error = afs_ioctl(vp, cmd, arg);
1383 afs_Trace3(afs_iclSetp, CM_TRACE_GIOCTL, ICL_TYPE_POINTER, vp,
1384 ICL_TYPE_LONG, cmd, ICL_TYPE_LONG, error);
1385 return (error);
1386 }
1387
1388
1389 int
1390 afs_gn_readlink(struct vnode *vp,
1391 struct uio *uiop,
1392 struct ucred *cred)
1393 {
1394 int error;
1395
1396 AFS_STATCNT(afs_gn_readlink);
1397 error = afs_readlink(vp, uiop, cred);
1398 afs_Trace2(afs_iclSetp, CM_TRACE_GREADLINK, ICL_TYPE_POINTER, vp,
1399 ICL_TYPE_LONG, error);
1400 return (error);
1401 }
1402
1403
1404 int
1405 afs_gn_select(struct vnode *vp,
1406 int32long64_t correl,
1407 ushort e,
1408 ushort *re,
1409 void (* notify)(),
1410 caddr_t vinfo,
1411 struct ucred *crp)
1412 {
1413 AFS_STATCNT(afs_gn_select);
1414 /* NO SUPPORT for this in afs YET! */
1415 return (EOPNOTSUPP);
1416 }
1417
1418
1419 int
1420 afs_gn_symlink(struct vnode *vp,
1421 char *link,
1422 char *target,
1423 struct ucred *cred)
1424 {
1425 struct vattr va;
1426 int error;
1427
1428 AFS_STATCNT(afs_gn_symlink);
1429 VATTR_NULL(&va);
1430 va.va_mode = 0777;
1431 error = afs_symlink(vp, link, &va, target, NULL, cred);
1432 afs_Trace4(afs_iclSetp, CM_TRACE_GSYMLINK, ICL_TYPE_POINTER, vp,
1433 ICL_TYPE_STRING, link, ICL_TYPE_STRING, target, ICL_TYPE_LONG,
1434 error);
1435 return (error);
1436 }
1437
1438
1439 int
1440 afs_gn_readdir(struct vnode *vp,
1441 struct uio *uiop,
1442 struct ucred *cred)
1443 {
1444 int error;
1445
1446 AFS_STATCNT(afs_gn_readdir);
1447 error = afs_readdir(vp, uiop, cred);
1448 afs_Trace2(afs_iclSetp, CM_TRACE_GREADDIR, ICL_TYPE_POINTER, vp,
1449 ICL_TYPE_LONG, error);
1450 return (error);
1451 }
1452
1453
1454 extern Simple_lock afs_asyncbuf_lock;
1455 extern struct buf *afs_asyncbuf;
1456 extern int afs_asyncbuf_cv;
1457
1458 /*
1459 * Buffers are ranked by age. A buffer's age is the value of afs_biotime
1460 * when the buffer is processed by afs_gn_strategy. afs_biotime is
1461 * incremented for each buffer. A buffer's age is kept in its av_back field.
1462 * The age ranking is used by the daemons, which favor older buffers.
1463 */
1464 afs_int32 afs_biotime = 0;
1465
1466 /* This function is called with a list of buffers, threaded through
1467 * the av_forw field. Our goal is to copy the list of buffers into the
1468 * afs_asyncbuf list, sorting buffers into sublists linked by the b_work field.
1469 * Within buffers within the same work group, the guy with the lowest address
1470 * has to be located at the head of the queue; his b_bcount field will also
1471 * be increased to cover all of the buffers in the b_work queue.
1472 */
1473 #define AIX_VM_BLKSIZE 8192
1474 /* Note: This function seems to be called as ddstrategy entry point, ie
1475 * has one argument. However, it also needs to be present as
1476 * vn_strategy entry point which has three arguments, but it seems to never
1477 * be called in that capacity (it would fail horribly due to the argument
1478 * mismatch). I'm confused, but it obviously has to be this way, maybe
1479 * some IBM people can shed som light on this
1480 */
1481 int
1482 afs_gn_strategy(struct buf *abp)
1483 {
1484 struct buf **lbp, *tbp;
1485 struct buf **lwbp;
1486 struct buf *nbp, *qbp, *qnbp, *firstComparable;
1487 int doMerge;
1488 int oldPriority;
1489
1490 #define EFS_COMPARABLE(x,y) ((x)->b_vp == (y)->b_vp \
1491 && (x)->b_xmemd.subspace_id == (y)->b_xmemd.subspace_id \
1492 && (x)->b_flags == (y)->b_flags \
1493 && !((x)->b_flags & B_PFPROT) \
1494 && !((y)->b_flags & B_PFPROT))
1495
1496 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
1497 for (tbp = abp; tbp; tbp = nbp) {
1498 nbp = tbp->av_forw; /* remember for later */
1499 tbp->b_work = 0;
1500 tbp->av_back = (struct buf *)afs_biotime++;
1501
1502 /* first insert the buffer into the afs_async queue. Insert buffer
1503 * sorted within its disk position within a set of comparable buffers.
1504 * Ensure that all comparable buffers are grouped contiguously.
1505 * Later on, we'll merge adjacent buffers into a single request.
1506 */
1507 firstComparable = NULL;
1508 lbp = &afs_asyncbuf;
1509 for (qbp = *lbp; qbp; lbp = &qbp->av_forw, qbp = *lbp) {
1510 if (EFS_COMPARABLE(tbp, qbp)) {
1511 if (!firstComparable)
1512 firstComparable = qbp;
1513 /* this buffer is comparable, so see if the next buffer
1514 * is farther in the file; if it is insert before next buffer.
1515 */
1516 if (tbp->b_blkno < qbp->b_blkno) {
1517 break;
1518 }
1519 } else {
1520 /* If we're at the end of a block of comparable buffers, we
1521 * insert the buffer here to keep all comparable buffers
1522 * contiguous.
1523 */
1524 if (firstComparable)
1525 break;
1526 }
1527 }
1528 /* do the insert before qbp now */
1529 tbp->av_forw = *lbp;
1530 *lbp = tbp;
1531 if (firstComparable == NULL) {
1532 /* next we're going to do all sorts of buffer merging tricks, but
1533 * here we know we're the only COMPARABLE block in the
1534 * afs_asyncbuf list, so we just skip that and continue with
1535 * the next input buffer.
1536 */
1537 continue;
1538 }
1539
1540 /* we may have actually added the "new" firstComparable */
1541 if (tbp->av_forw == firstComparable)
1542 firstComparable = tbp;
1543 /*
1544 * when we get here, firstComparable points to the first dude in the
1545 * same vnode and subspace that we (tbp) are in. We go through the
1546 * area of this list with COMPARABLE buffers (a contiguous region) and
1547 * repeated merge buffers that are contiguous and in the same block or
1548 * buffers that are contiguous and are both integral numbers of blocks.
1549 * Note that our end goal is to have as big blocks as we can, but we
1550 * must minimize the transfers that are not integral #s of blocks on
1551 * block boundaries, since Episode will do those smaller and/or
1552 * unaligned I/Os synchronously.
1553 *
1554 * A useful example to consider has the async queue with this in it:
1555 * [8K block, 2 pages] [4K block, 1 page] [4K hole] [8K block, 2 pages]
1556 * If we get a request that fills the 4K hole, we want to merge this
1557 * whole mess into a 24K, 6 page transfer. If we don't, however, we
1558 * don't want to do any merging since adding the 4K transfer to the 8K
1559 * transfer makes the 8K transfer synchronous.
1560 *
1561 * Note that if there are any blocks whose size is a multiple of
1562 * the file system block size, then we know that such blocks are also
1563 * on block boundaries.
1564 */
1565
1566 doMerge = 1; /* start the loop */
1567 while (doMerge) { /* loop until an iteration doesn't
1568 * make any more changes */
1569 doMerge = 0;
1570 for (qbp = firstComparable;; qbp = qnbp) {
1571 qnbp = qbp->av_forw;
1572 if (!qnbp)
1573 break; /* we're done */
1574 if (!EFS_COMPARABLE(qbp, qnbp))
1575 break;
1576
1577 /* try to merge qbp and qnbp */
1578
1579 /* first check if both not adjacent go on to next region */
1580 if ((dbtob(qbp->b_blkno) + qbp->b_bcount) !=
1581 dbtob(qnbp->b_blkno))
1582 continue;
1583
1584 /* note if both in the same block, the first byte of leftmost guy
1585 * and last byte of rightmost guy are in the same block.
1586 */
1587 if ((dbtob(qbp->b_blkno) & ~(AIX_VM_BLKSIZE - 1)) ==
1588 ((dbtob(qnbp->b_blkno) + qnbp->b_bcount -
1589 1) & ~(AIX_VM_BLKSIZE - 1))) {
1590 doMerge = 1; /* both in same block */
1591 } else if ((qbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0
1592 && (qnbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0) {
1593 doMerge = 1; /* both integral #s of blocks */
1594 }
1595 if (doMerge) {
1596 struct buf *xbp;
1597
1598 /* merge both of these blocks together */
1599 /* first set age to the older of the two */
1600 if ((int32long64_t) qnbp->av_back -
1601 (int32long64_t) qbp->av_back < 0) {
1602 qbp->av_back = qnbp->av_back;
1603 }
1604 lwbp = (struct buf **) &qbp->b_work;
1605 /* find end of qbp's work queue */
1606 for (xbp = *lwbp; xbp;
1607 lwbp = (struct buf **) &xbp->b_work, xbp = *lwbp);
1608 /*
1609 * now setting *lwbp will change the last ptr in the qbp's
1610 * work chain
1611 */
1612 qbp->av_forw = qnbp->av_forw; /* splice out qnbp */
1613 qbp->b_bcount += qnbp->b_bcount; /* fix count */
1614 *lwbp = qnbp; /* append qnbp to end */
1615 /*
1616 * note that qnbp is bogus, but it doesn't matter because
1617 * we're going to restart the for loop now.
1618 */
1619 break; /* out of the for loop */
1620 }
1621 }
1622 }
1623 } /* for loop for all interrupt data */
1624 /* at this point, all I/O has been queued. Wakeup the daemon */
1625 e_wakeup_one((int *)&afs_asyncbuf_cv);
1626 unlock_enable(oldPriority, &afs_asyncbuf_lock);
1627 return 0;
1628 }
1629
1630
1631 int
1632 afs_inactive(struct vcache *avc,
1633 afs_ucred_t *acred)
1634 {
1635 afs_InactiveVCache(avc, acred);
1636 }
1637
1638 int
1639 afs_gn_revoke(struct vnode *vp,
1640 int32long64_t cmd,
1641 int32long64_t flag,
1642 struct vattr *vinfop,
1643 struct ucred *crp)
1644 {
1645 AFS_STATCNT(afs_gn_revoke);
1646 /* NO SUPPORT for this in afs YET! */
1647 return (EOPNOTSUPP);
1648 }
1649
1650 int
1651 afs_gn_getacl(struct vnode *vp,
1652 struct uio *uiop,
1653 struct ucred *cred)
1654 {
1655 return ENOSYS;
1656 };
1657
1658
1659 int
1660 afs_gn_setacl(struct vnode *vp,
1661 struct uio *uiop,
1662 struct ucred *cred)
1663 {
1664 return ENOSYS;
1665 };
1666
1667
1668 int
1669 afs_gn_getpcl(struct vnode *vp,
1670 struct uio *uiop,
1671 struct ucred *cred)
1672 {
1673 return ENOSYS;
1674 };
1675
1676
1677 int
1678 afs_gn_setpcl(struct vnode *vp,
1679 struct uio *uiop,
1680 struct ucred *cred)
1681 {
1682 return ENOSYS;
1683 };
1684
1685
1686 int
1687 afs_gn_seek(struct vnode* vp, offset_t * offp, struct ucred * crp)
1688 {
1689 /*
1690 * File systems which do not wish to do offset validation can simply
1691 * return 0. File systems which do not provide the vn_seek entry point
1692 * will have a maximum offset of OFF_MAX (2 gigabytes minus 1) enforced
1693 * by the logical file system.
1694 */
1695 return 0;
1696 }
1697
1698
1699 int
1700 afs_gn_enosys()
1701 {
1702 return ENOSYS;
1703 }
1704
1705 /*
1706 * declare a struct vnodeops and initialize it with ptrs to all functions
1707 */
1708 struct vnodeops afs_gn_vnodeops = {
1709 /* creation/naming/deletion */
1710 afs_gn_link,
1711 afs_gn_mkdir,
1712 afs_gn_mknod,
1713 afs_gn_remove,
1714 afs_gn_rename,
1715 afs_gn_rmdir,
1716 /* lookup, file handle stuff */
1717 afs_gn_lookup,
1718 (int(*)(struct vnode*,struct fileid*,struct ucred*))
1719 afs_gn_fid,
1720 /* access to files */
1721 (int(*)(struct vnode *, int32long64_t, ext_t, caddr_t *,struct ucred *))
1722 afs_gn_open,
1723 (int(*)(struct vnode *, struct vnode **, int32long64_t,caddr_t, int32long64_t, caddr_t *, struct ucred *))
1724 afs_gn_create,
1725 afs_gn_hold,
1726 afs_gn_rele,
1727 afs_gn_close,
1728 afs_gn_map,
1729 afs_gn_unmap,
1730 /* manipulate attributes of files */
1731 afs_gn_access,
1732 afs_gn_getattr,
1733 afs_gn_setattr,
1734 /* data update operations */
1735 afs_gn_fclear,
1736 afs_gn_fsync,
1737 afs_gn_ftrunc,
1738 afs_gn_rdwr,
1739 afs_gn_lockctl,
1740 /* extensions */
1741 afs_gn_ioctl,
1742 afs_gn_readlink,
1743 afs_gn_select,
1744 afs_gn_symlink,
1745 afs_gn_readdir,
1746 /* buffer ops */
1747 (int(*)(struct vnode*,struct buf*,struct ucred*))
1748 afs_gn_strategy,
1749 /* security things */
1750 afs_gn_revoke,
1751 afs_gn_getacl,
1752 afs_gn_setacl,
1753 afs_gn_getpcl,
1754 afs_gn_setpcl,
1755 afs_gn_seek,
1756 (int(*)(struct vnode *, int32long64_t, int32long64_t, offset_t, offset_t, struct ucred *))
1757 afs_gn_enosys, /* vn_fsync_range */
1758 (int(*)(struct vnode *, struct vnode **, int32long64_t, char *, struct vattr *, int32long64_t, caddr_t *, struct ucred *))
1759 afs_gn_enosys, /* vn_create_attr */
1760 (int(*)(struct vnode *, int32long64_t, void *, size_t, struct ucred *))
1761 afs_gn_enosys, /* vn_finfo */
1762 (int(*)(struct vnode *, caddr_t, offset_t, offset_t, uint32long64_t, uint32long64_t, struct ucred *))
1763 afs_gn_enosys, /* vn_map_lloff */
1764 (int(*)(struct vnode*,struct uio*,int*,struct ucred*))
1765 afs_gn_enosys, /* vn_readdir_eofp */
1766 (int(*)(struct vnode *, enum uio_rw, int32long64_t, struct uio *, ext_t , caddr_t, struct vattr *, struct vattr *, struct ucred *))
1767 afs_gn_enosys, /* vn_rdwr_attr */
1768 (int(*)(struct vnode*,int,void*,struct ucred*))
1769 afs_gn_enosys, /* vn_memcntl */
1770 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
1771 (int(*)(struct vnode*,const char*,struct uio*,struct ucred*))
1772 afs_gn_enosys, /* vn_getea */
1773 (int(*)(struct vnode*,const char*,struct uio*,int,struct ucred*))
1774 afs_gn_enosys, /* vn_setea */
1775 (int(*)(struct vnode *, struct uio *, struct ucred *))
1776 afs_gn_enosys, /* vn_listea */
1777 (int(*)(struct vnode *, const char *, struct ucred *))
1778 afs_gn_enosys, /* vn_removeea */
1779 (int(*)(struct vnode *, const char *, struct vattr *, struct ucred *))
1780 afs_gn_enosys, /* vn_statea */
1781 (int(*)(struct vnode *, uint64_t, acl_type_t *, struct uio *, size_t *, mode_t *, struct ucred *))
1782 afs_gn_enosys, /* vn_getxacl */
1783 (int(*)(struct vnode *, uint64_t, acl_type_t, struct uio *, mode_t, struct ucred *))
1784 afs_gn_enosys, /* vn_setxacl */
1785 #else /* AFS_AIX53_ENV */
1786 afs_gn_enosys, /* vn_spare7 */
1787 afs_gn_enosys, /* vn_spare8 */
1788 afs_gn_enosys, /* vn_spare9 */
1789 afs_gn_enosys, /* vn_spareA */
1790 afs_gn_enosys, /* vn_spareB */
1791 afs_gn_enosys, /* vn_spareC */
1792 afs_gn_enosys, /* vn_spareD */
1793 #endif /* AFS_AIX53_ENV */
1794 afs_gn_enosys, /* vn_spareE */
1795 afs_gn_enosys /* vn_spareF */
1796 #ifdef AFS_AIX51_ENV
1797 ,(int(*)(struct gnode*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
1798 afs_gn_enosys, /* pagerBackRange */
1799 (int64_t(*)(struct gnode*))
1800 afs_gn_enosys, /* pagerGetFileSize */
1801 (void(*)(struct gnode *, vpn_t, vpn_t *, vpn_t *, vpn_t *, boolean_t))
1802 afs_gn_enosys, /* pagerReadAhead */
1803 (void(*)(struct gnode *, int64_t, int64_t, uint))
1804 afs_gn_enosys, /* pagerReadWriteBehind */
1805 (void(*)(struct gnode*,long long,unsigned long,unsigned long,unsigned int))
1806 afs_gn_enosys /* pagerEndCopy */
1807 #endif
1808 };
1809 struct vnodeops *afs_ops = &afs_gn_vnodeops;
1810
1811
1812
1813 extern struct vfsops Afs_vfsops;
1814 extern int Afs_init();
1815
1816 #define AFS_CALLOUT_TBL_SIZE 256
1817
1818 /*
1819 * the following additional layer of gorp is due to the fact that the
1820 * filesystem layer no longer obtains the kernel lock for me. I was relying
1821 * on this behavior to avoid having to think about locking.
1822 */
1823
1824 static
1825 vfs_mount(struct vfs *a, struct ucred *b)
1826 {
1827 int glockOwner, ret;
1828
1829 glockOwner = ISAFS_GLOCK();
1830 if (!glockOwner)
1831 AFS_GLOCK();
1832 ret = (*Afs_vfsops.vfs_mount) (a, b);
1833 if (!glockOwner)
1834 AFS_GUNLOCK();
1835
1836 return ret;
1837 }
1838
1839 static
1840 vfs_unmount(struct vfs *a, int b, struct ucred *c)
1841 {
1842 int glockOwner, ret;
1843
1844 glockOwner = ISAFS_GLOCK();
1845 if (!glockOwner)
1846 AFS_GLOCK();
1847 ret = (*Afs_vfsops.vfs_unmount) (a, b, c);
1848 if (!glockOwner)
1849 AFS_GUNLOCK();
1850
1851 return ret;
1852 }
1853
1854 static
1855 vfs_root(struct vfs *a, struct vnode **b, struct ucred *c)
1856 {
1857 int glockOwner, ret;
1858
1859 glockOwner = ISAFS_GLOCK();
1860 if (!glockOwner)
1861 AFS_GLOCK();
1862 ret = (*Afs_vfsops.vfs_root) (a, b, c);
1863 if (!glockOwner)
1864 AFS_GUNLOCK();
1865
1866 return ret;
1867 }
1868
1869 static
1870 vfs_statfs(struct vfs *a, struct statfs *b, struct ucred *c)
1871 {
1872 int glockOwner, ret;
1873
1874 glockOwner = ISAFS_GLOCK();
1875 if (!glockOwner)
1876 AFS_GLOCK();
1877 ret = (*Afs_vfsops.vfs_statfs) (a, b, c);
1878 if (!glockOwner)
1879 AFS_GUNLOCK();
1880
1881 return ret;
1882 }
1883
1884 static
1885 vfs_sync(struct gfs *a)
1886 {
1887 int glockOwner, ret;
1888
1889 glockOwner = ISAFS_GLOCK();
1890 if (!glockOwner)
1891 AFS_GLOCK();
1892 ret = (*Afs_vfsops.vfs_sync) (a);
1893 if (!glockOwner)
1894 AFS_GUNLOCK();
1895 return ret;
1896 }
1897
1898 static
1899 vfs_vget(struct vfs *a, struct vnode **b, struct fileid *c, struct ucred *d)
1900 {
1901 int glockOwner, ret;
1902
1903 glockOwner = ISAFS_GLOCK();
1904 if (!glockOwner)
1905 AFS_GLOCK();
1906 ret = (*Afs_vfsops.vfs_vget) (a, b, c, d);
1907 if (!glockOwner)
1908 AFS_GUNLOCK();
1909
1910 return ret;
1911 }
1912
1913 static
1914 vfs_cntl(struct vfs *a, int b, caddr_t c, size_t d, struct ucred *e)
1915 {
1916 int glockOwner, ret;
1917
1918 glockOwner = ISAFS_GLOCK();
1919 if (!glockOwner)
1920 AFS_GLOCK();
1921 ret = (*Afs_vfsops.vfs_cntl) (a, b, c, d, e);
1922 if (!glockOwner)
1923 AFS_GUNLOCK();
1924
1925 return ret;
1926 }
1927
1928 static
1929 vfs_quotactl(struct vfs *a, int b, uid_t c, caddr_t d, struct ucred *e)
1930 {
1931 int glockOwner, ret;
1932
1933 glockOwner = ISAFS_GLOCK();
1934 if (!glockOwner)
1935 AFS_GLOCK();
1936 ret = (*Afs_vfsops.vfs_quotactl) (a, b, c, d, e);
1937 if (!glockOwner)
1938 AFS_GUNLOCK();
1939
1940 return ret;
1941 }
1942
1943 #ifdef AFS_AIX51_ENV
1944 static
1945 vfs_syncvfs(struct gfs *a, struct vfs *b, int c, struct ucred *d)
1946 {
1947 int glockOwner, ret;
1948
1949 glockOwner = ISAFS_GLOCK();
1950 if (!glockOwner)
1951 AFS_GLOCK();
1952 ret = (*Afs_vfsops.vfs_syncvfs) (a, b, c, d);
1953 if (!glockOwner)
1954 AFS_GUNLOCK();
1955
1956 return ret;
1957 }
1958 #endif
1959
1960
1961 struct vfsops locked_Afs_vfsops = {
1962 vfs_mount,
1963 vfs_unmount,
1964 vfs_root,
1965 vfs_statfs,
1966 vfs_sync,
1967 vfs_vget,
1968 vfs_cntl,
1969 vfs_quotactl,
1970 #ifdef AFS_AIX51_ENV
1971 vfs_syncvfs
1972 #endif
1973 };
1974
1975 static
1976 vn_link(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
1977 {
1978 int glockOwner, ret;
1979
1980 glockOwner = ISAFS_GLOCK();
1981 if (!glockOwner)
1982 AFS_GLOCK();
1983 ret = (*afs_gn_vnodeops.vn_link) (a, b, c, d);
1984 if (!glockOwner)
1985 AFS_GUNLOCK();
1986
1987 return ret;
1988 }
1989
1990 static
1991 vn_mkdir(struct vnode *a, char *b, int32long64_t c, struct ucred *d)
1992 {
1993 int glockOwner, ret;
1994
1995 glockOwner = ISAFS_GLOCK();
1996 if (!glockOwner)
1997 AFS_GLOCK();
1998 ret = (*afs_gn_vnodeops.vn_mkdir) (a, b, c, d);
1999 if (!glockOwner)
2000 AFS_GUNLOCK();
2001
2002 return ret;
2003 }
2004
2005 static
2006 vn_mknod(struct vnode *a, caddr_t b, int32long64_t c, dev_t d,
2007 struct ucred *e)
2008 {
2009 int glockOwner, ret;
2010
2011 glockOwner = ISAFS_GLOCK();
2012 if (!glockOwner)
2013 AFS_GLOCK();
2014 ret = (*afs_gn_vnodeops.vn_mknod) (a, b, c, d, e);
2015 if (!glockOwner)
2016 AFS_GUNLOCK();
2017
2018 return ret;
2019 }
2020
2021 static
2022 vn_remove(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2023 {
2024 int glockOwner, ret;
2025
2026 glockOwner = ISAFS_GLOCK();
2027 if (!glockOwner)
2028 AFS_GLOCK();
2029 ret = (*afs_gn_vnodeops.vn_remove) (a, b, c, d);
2030 if (!glockOwner)
2031 AFS_GUNLOCK();
2032
2033 return ret;
2034 }
2035
2036 static
2037 vn_rename(struct vnode *a, struct vnode *b, caddr_t c, struct vnode *d,
2038 struct vnode *e, caddr_t f, struct ucred *g)
2039 {
2040 int glockOwner, ret;
2041
2042 glockOwner = ISAFS_GLOCK();
2043 if (!glockOwner)
2044 AFS_GLOCK();
2045 ret = (*afs_gn_vnodeops.vn_rename) (a, b, c, d, e, f, g);
2046 if (!glockOwner)
2047 AFS_GUNLOCK();
2048
2049 return ret;
2050 }
2051
2052 static
2053 vn_rmdir(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2054 {
2055 int glockOwner, ret;
2056
2057 glockOwner = ISAFS_GLOCK();
2058 if (!glockOwner)
2059 AFS_GLOCK();
2060 ret = (*afs_gn_vnodeops.vn_rmdir) (a, b, c, d);
2061 if (!glockOwner)
2062 AFS_GUNLOCK();
2063
2064 return ret;
2065 }
2066
2067 static
2068 vn_lookup(struct vnode *a, struct vnode **b, char *c, int32long64_t d,
2069 struct vattr *v, struct ucred *e)
2070 {
2071 int glockOwner, ret;
2072
2073 glockOwner = ISAFS_GLOCK();
2074 if (!glockOwner)
2075 AFS_GLOCK();
2076 ret = (*afs_gn_vnodeops.vn_lookup) (a, b, c, d, v, e);
2077 if (!glockOwner)
2078 AFS_GUNLOCK();
2079
2080 return ret;
2081 }
2082
2083 static
2084 vn_fid(struct vnode *a, struct fileid *b, struct ucred *c)
2085 {
2086 int glockOwner, ret;
2087
2088 glockOwner = ISAFS_GLOCK();
2089 if (!glockOwner)
2090 AFS_GLOCK();
2091 ret = (*afs_gn_vnodeops.vn_fid) (a, b, c);
2092 if (!glockOwner)
2093 AFS_GUNLOCK();
2094
2095 return ret;
2096 }
2097
2098 static
2099 vn_open(struct vnode *a,
2100 int32long64_t b,
2101 ext_t c,
2102 caddr_t * d,
2103 struct ucred *e)
2104 {
2105 int glockOwner, ret;
2106
2107 glockOwner = ISAFS_GLOCK();
2108 if (!glockOwner)
2109 AFS_GLOCK();
2110 ret = (*afs_gn_vnodeops.vn_open) (a, b, c, d, e);
2111 if (!glockOwner)
2112 AFS_GUNLOCK();
2113
2114 return ret;
2115 }
2116
2117 static
2118 vn_create(struct vnode *a, struct vnode **b, int32long64_t c, caddr_t d,
2119 int32long64_t e, caddr_t * f, struct ucred *g)
2120 {
2121 int glockOwner, ret;
2122
2123 glockOwner = ISAFS_GLOCK();
2124 if (!glockOwner)
2125 AFS_GLOCK();
2126 ret = (*afs_gn_vnodeops.vn_create) (a, b, c, d, e, f, g);
2127 if (!glockOwner)
2128 AFS_GUNLOCK();
2129
2130 return ret;
2131 }
2132
2133 static
2134 vn_hold(struct vnode *a)
2135 {
2136 int glockOwner, ret;
2137
2138 glockOwner = ISAFS_GLOCK();
2139 if (!glockOwner)
2140 AFS_GLOCK();
2141 ret = (*afs_gn_vnodeops.vn_hold) (a);
2142 if (!glockOwner)
2143 AFS_GUNLOCK();
2144
2145 return ret;
2146 }
2147
2148 static
2149 vn_rele(struct vnode *a)
2150 {
2151 int glockOwner, ret;
2152
2153 glockOwner = ISAFS_GLOCK();
2154 if (!glockOwner)
2155 AFS_GLOCK();
2156 ret = (*afs_gn_vnodeops.vn_rele) (a);
2157 if (!glockOwner)
2158 AFS_GUNLOCK();
2159
2160 return ret;
2161 }
2162
2163 static
2164 vn_close(struct vnode *a, int32long64_t b, caddr_t c, struct ucred *d)
2165 {
2166 int glockOwner, ret;
2167
2168 glockOwner = ISAFS_GLOCK();
2169 if (!glockOwner)
2170 AFS_GLOCK();
2171 ret = (*afs_gn_vnodeops.vn_close) (a, b, c, d);
2172 if (!glockOwner)
2173 AFS_GUNLOCK();
2174
2175 return ret;
2176 }
2177
2178 static
2179 vn_map(struct vnode *a, caddr_t b, uint32long64_t c, uint32long64_t d,
2180 uint32long64_t e, struct ucred *f)
2181 {
2182 int glockOwner, ret;
2183
2184 glockOwner = ISAFS_GLOCK();
2185 if (!glockOwner)
2186 AFS_GLOCK();
2187 ret = (*afs_gn_vnodeops.vn_map) (a, b, c, d, e, f);
2188 if (!glockOwner)
2189 AFS_GUNLOCK();
2190
2191 return ret;
2192 }
2193
2194 static
2195 vn_unmap(struct vnode *a, int32long64_t b, struct ucred *c)
2196 {
2197 int glockOwner, ret;
2198
2199 glockOwner = ISAFS_GLOCK();
2200 if (!glockOwner)
2201 AFS_GLOCK();
2202 ret = (*afs_gn_vnodeops.vn_unmap) (a, b, c);
2203 if (!glockOwner)
2204 AFS_GUNLOCK();
2205
2206 return ret;
2207 }
2208
2209 static
2210 vn_access(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2211 {
2212 int glockOwner, ret;
2213
2214 glockOwner = ISAFS_GLOCK();
2215 if (!glockOwner)
2216 AFS_GLOCK();
2217 ret = (*afs_gn_vnodeops.vn_access) (a, b, c, d);
2218 if (!glockOwner)
2219 AFS_GUNLOCK();
2220
2221 return ret;
2222 }
2223
2224 static
2225 vn_getattr(struct vnode *a, struct vattr *b, struct ucred *c)
2226 {
2227 int glockOwner, ret;
2228
2229 glockOwner = ISAFS_GLOCK();
2230 if (!glockOwner)
2231 AFS_GLOCK();
2232 ret = (*afs_gn_vnodeops.vn_getattr) (a, b, c);
2233 if (!glockOwner)
2234 AFS_GUNLOCK();
2235
2236 return ret;
2237 }
2238
2239 static
2240 vn_setattr(struct vnode *a, int32long64_t b, int32long64_t c, int32long64_t d,
2241 int32long64_t e, struct ucred *f)
2242 {
2243 int glockOwner, ret;
2244
2245 glockOwner = ISAFS_GLOCK();
2246 if (!glockOwner)
2247 AFS_GLOCK();
2248 ret = (*afs_gn_vnodeops.vn_setattr) (a, b, c, d, e, f);
2249 if (!glockOwner)
2250 AFS_GUNLOCK();
2251
2252 return ret;
2253 }
2254
2255 static
2256 vn_fclear(struct vnode *a, int32long64_t b, offset_t c, offset_t d
2257 , caddr_t e, struct ucred *f)
2258 {
2259 int glockOwner, ret;
2260
2261 glockOwner = ISAFS_GLOCK();
2262 if (!glockOwner)
2263 AFS_GLOCK();
2264 ret = (*afs_gn_vnodeops.vn_fclear) (a, b, c, d, e, f);
2265 if (!glockOwner)
2266 AFS_GUNLOCK();
2267
2268 return ret;
2269 }
2270
2271 static
2272 vn_fsync(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2273 {
2274 int glockOwner, ret;
2275
2276 glockOwner = ISAFS_GLOCK();
2277 if (!glockOwner)
2278 AFS_GLOCK();
2279 ret = (*afs_gn_vnodeops.vn_fsync) (a, b, c, d);
2280 if (!glockOwner)
2281 AFS_GUNLOCK();
2282
2283 return ret;
2284 }
2285
2286 static
2287 vn_ftrunc(struct vnode *a, int32long64_t b, offset_t c, caddr_t d,
2288 struct ucred *e)
2289 {
2290 int glockOwner, ret;
2291
2292 glockOwner = ISAFS_GLOCK();
2293 if (!glockOwner)
2294 AFS_GLOCK();
2295 ret = (*afs_gn_vnodeops.vn_ftrunc) (a, b, c, d, e);
2296 if (!glockOwner)
2297 AFS_GUNLOCK();
2298
2299 return ret;
2300 }
2301
2302 static
2303 vn_rdwr(struct vnode *a, enum uio_rw b, int32long64_t c, struct uio *d,
2304 ext_t e, caddr_t f, struct vattr *v, struct ucred *g)
2305 {
2306 int glockOwner, ret;
2307
2308 glockOwner = ISAFS_GLOCK();
2309 if (!glockOwner)
2310 AFS_GLOCK();
2311 ret = (*afs_gn_vnodeops.vn_rdwr) (a, b, c, d, e, f, v, g);
2312 if (!glockOwner)
2313 AFS_GUNLOCK();
2314
2315 return ret;
2316 }
2317
2318 static
2319 vn_lockctl(struct vnode *a,
2320 offset_t b,
2321 struct eflock *c,
2322 int32long64_t d,
2323 int (*e) (),
2324 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
2325 ulong * f,
2326 #else /* AFS_AIX52_ENV */
2327 ulong32int64_t * f,
2328 #endif /* AFS_AIX52_ENV */
2329 struct ucred *g)
2330 {
2331 int glockOwner, ret;
2332
2333 glockOwner = ISAFS_GLOCK();
2334 if (!glockOwner)
2335 AFS_GLOCK();
2336 ret = (*afs_gn_vnodeops.vn_lockctl) (a, b, c, d, e, f, g);
2337 if (!glockOwner)
2338 AFS_GUNLOCK();
2339
2340 return ret;
2341 }
2342
2343 static
2344 vn_ioctl(struct vnode *a, int32long64_t b, caddr_t c, size_t d, ext_t e,
2345 struct ucred *f)
2346 {
2347 int glockOwner, ret;
2348
2349 glockOwner = ISAFS_GLOCK();
2350 if (!glockOwner)
2351 AFS_GLOCK();
2352 ret = (*afs_gn_vnodeops.vn_ioctl) (a, b, c, d, e, f);
2353 if (!glockOwner)
2354 AFS_GUNLOCK();
2355
2356 return ret;
2357 }
2358
2359 static
2360 vn_readlink(struct vnode *a, struct uio *b, struct ucred *c)
2361 {
2362 int glockOwner, ret;
2363
2364 glockOwner = ISAFS_GLOCK();
2365 if (!glockOwner)
2366 AFS_GLOCK();
2367 ret = (*afs_gn_vnodeops.vn_readlink) (a, b, c);
2368 if (!glockOwner)
2369 AFS_GUNLOCK();
2370
2371 return ret;
2372 }
2373
2374 static
2375 vn_select(struct vnode *a, int32long64_t b, ushort c, ushort * d,
2376 void (*e) (), caddr_t f, struct ucred *g)
2377 {
2378 int glockOwner, ret;
2379
2380 glockOwner = ISAFS_GLOCK();
2381 if (!glockOwner)
2382 AFS_GLOCK();
2383 ret = (*afs_gn_vnodeops.vn_select) (a, b, c, d, e, f, g);
2384 if (!glockOwner)
2385 AFS_GUNLOCK();
2386
2387 return ret;
2388 }
2389
2390 static
2391 vn_symlink(struct vnode *a, char *b, char *c, struct ucred *d)
2392 {
2393 int glockOwner, ret;
2394
2395 glockOwner = ISAFS_GLOCK();
2396 if (!glockOwner)
2397 AFS_GLOCK();
2398 ret = (*afs_gn_vnodeops.vn_symlink) (a, b, c, d);
2399 if (!glockOwner)
2400 AFS_GUNLOCK();
2401
2402 return ret;
2403 }
2404
2405 static
2406 vn_readdir(struct vnode *a, struct uio *b, struct ucred *c)
2407 {
2408 int glockOwner, ret;
2409
2410 glockOwner = ISAFS_GLOCK();
2411 if (!glockOwner)
2412 AFS_GLOCK();
2413 ret = (*afs_gn_vnodeops.vn_readdir) (a, b, c);
2414 if (!glockOwner)
2415 AFS_GUNLOCK();
2416
2417 return ret;
2418 }
2419
2420 static
2421 vn_revoke(struct vnode *a, int32long64_t b, int32long64_t c, struct vattr *d,
2422 struct ucred *e)
2423 {
2424 int glockOwner, ret;
2425
2426 glockOwner = ISAFS_GLOCK();
2427 if (!glockOwner)
2428 AFS_GLOCK();
2429 ret = (*afs_gn_vnodeops.vn_revoke) (a, b, c, d, e);
2430 if (!glockOwner)
2431 AFS_GUNLOCK();
2432
2433 return ret;
2434 }
2435
2436 static
2437 vn_getacl(struct vnode *a, struct uio *b, struct ucred *c)
2438 {
2439 int glockOwner, ret;
2440
2441 glockOwner = ISAFS_GLOCK();
2442 if (!glockOwner)
2443 AFS_GLOCK();
2444 ret = (*afs_gn_vnodeops.vn_getacl) (a, b, c);
2445 if (!glockOwner)
2446 AFS_GUNLOCK();
2447
2448 return ret;
2449 }
2450
2451 static
2452 vn_setacl(struct vnode *a, struct uio *b, struct ucred *c)
2453 {
2454 int glockOwner, ret;
2455
2456 glockOwner = ISAFS_GLOCK();
2457 if (!glockOwner)
2458 AFS_GLOCK();
2459 ret = (*afs_gn_vnodeops.vn_setacl) (a, b, c);
2460 if (!glockOwner)
2461 AFS_GUNLOCK();
2462
2463 return ret;
2464 }
2465
2466 static
2467 vn_getpcl(struct vnode *a, struct uio *b, struct ucred *c)
2468 {
2469 int glockOwner, ret;
2470
2471 glockOwner = ISAFS_GLOCK();
2472 if (!glockOwner)
2473 AFS_GLOCK();
2474 ret = (*afs_gn_vnodeops.vn_getpcl) (a, b, c);
2475 if (!glockOwner)
2476 AFS_GUNLOCK();
2477
2478 return ret;
2479 }
2480
2481 static
2482 vn_setpcl(struct vnode *a, struct uio *b, struct ucred *c)
2483 {
2484 int glockOwner, ret;
2485
2486 glockOwner = ISAFS_GLOCK();
2487 if (!glockOwner)
2488 AFS_GLOCK();
2489 ret = (*afs_gn_vnodeops.vn_setpcl) (a, b, c);
2490 if (!glockOwner)
2491 AFS_GUNLOCK();
2492
2493 return ret;
2494 }
2495
2496
2497 struct vnodeops locked_afs_gn_vnodeops = {
2498 vn_link,
2499 vn_mkdir,
2500 vn_mknod,
2501 vn_remove,
2502 vn_rename,
2503 vn_rmdir,
2504 vn_lookup,
2505 vn_fid,
2506 vn_open,
2507 vn_create,
2508 vn_hold,
2509 vn_rele,
2510 vn_close,
2511 vn_map,
2512 vn_unmap,
2513 vn_access,
2514 vn_getattr,
2515 vn_setattr,
2516 vn_fclear,
2517 vn_fsync,
2518 vn_ftrunc,
2519 vn_rdwr,
2520 vn_lockctl,
2521 vn_ioctl,
2522 vn_readlink,
2523 vn_select,
2524 vn_symlink,
2525 vn_readdir,
2526 (int(*)(struct vnode*,struct buf*,struct ucred*))
2527 afs_gn_strategy, /* no locking!!! (discovered the hard way) */
2528 vn_revoke,
2529 vn_getacl,
2530 vn_setacl,
2531 vn_getpcl,
2532 vn_setpcl,
2533 afs_gn_seek,
2534 (int(*)(struct vnode *, int32long64_t, int32long64_t, offset_t, offset_t, struct ucred *))
2535 afs_gn_enosys, /* vn_fsync_range */
2536 (int(*)(struct vnode *, struct vnode **, int32long64_t, char *, struct vattr *, int32long64_t, caddr_t *, struct ucred *))
2537 afs_gn_enosys, /* vn_create_attr */
2538 (int(*)(struct vnode *, int32long64_t, void *, size_t, struct ucred *))
2539 afs_gn_enosys, /* vn_finfo */
2540 (int(*)(struct vnode *, caddr_t, offset_t, offset_t, uint32long64_t, uint32long64_t, struct ucred *))
2541 afs_gn_enosys, /* vn_map_lloff */
2542 (int(*)(struct vnode*,struct uio*,int*,struct ucred*))
2543 afs_gn_enosys, /* vn_readdir_eofp */
2544 (int(*)(struct vnode *, enum uio_rw, int32long64_t, struct uio *, ext_t , caddr_t, struct vattr *, struct vattr *, struct ucred *))
2545 afs_gn_enosys, /* vn_rdwr_attr */
2546 (int(*)(struct vnode*,int,void*,struct ucred*))
2547 afs_gn_enosys, /* vn_memcntl */
2548 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
2549 (int(*)(struct vnode*,const char*,struct uio*,struct ucred*))
2550 afs_gn_enosys, /* vn_getea */
2551 (int(*)(struct vnode*,const char*,struct uio*,int,struct ucred*))
2552 afs_gn_enosys, /* vn_setea */
2553 (int(*)(struct vnode *, struct uio *, struct ucred *))
2554 afs_gn_enosys, /* vn_listea */
2555 (int(*)(struct vnode *, const char *, struct ucred *))
2556 afs_gn_enosys, /* vn_removeea */
2557 (int(*)(struct vnode *, const char *, struct vattr *, struct ucred *))
2558 afs_gn_enosys, /* vn_statea */
2559 (int(*)(struct vnode *, uint64_t, acl_type_t *, struct uio *, size_t *, mode_t *, struct ucred *))
2560 afs_gn_enosys, /* vn_getxacl */
2561 (int(*)(struct vnode *, uint64_t, acl_type_t, struct uio *, mode_t, struct ucred *))
2562 afs_gn_enosys, /* vn_setxacl */
2563 #else /* AFS_AIX53_ENV */
2564 afs_gn_enosys, /* vn_spare7 */
2565 afs_gn_enosys, /* vn_spare8 */
2566 afs_gn_enosys, /* vn_spare9 */
2567 afs_gn_enosys, /* vn_spareA */
2568 afs_gn_enosys, /* vn_spareB */
2569 afs_gn_enosys, /* vn_spareC */
2570 afs_gn_enosys, /* vn_spareD */
2571 #endif /* AFS_AIX53_ENV */
2572 afs_gn_enosys, /* vn_spareE */
2573 afs_gn_enosys /* vn_spareF */
2574 #ifdef AFS_AIX51_ENV
2575 ,(int(*)(struct gnode*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
2576 afs_gn_enosys, /* pagerBackRange */
2577 (int64_t(*)(struct gnode*))
2578 afs_gn_enosys, /* pagerGetFileSize */
2579 (void(*)(struct gnode *, vpn_t, vpn_t *, vpn_t *, vpn_t *, boolean_t))
2580 afs_gn_enosys, /* pagerReadAhead */
2581 (void(*)(struct gnode *, int64_t, int64_t, uint))
2582 afs_gn_enosys, /* pagerReadWriteBehind */
2583 (void(*)(struct gnode*,long long,unsigned long,unsigned long,unsigned int))
2584 afs_gn_enosys /* pagerEndCopy */
2585 #endif
2586 };
2587
2588 struct gfs afs_gfs = {
2589 &locked_Afs_vfsops,
2590 &locked_afs_gn_vnodeops,
2591 AFS_MOUNT_AFS,
2592 "afs",
2593 Afs_init,
2594 GFS_VERSION4 | GFS_VERSION42 | GFS_REMOTE,
2595 NULL
2596 };