2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
29 #include "rpc/types.h"
31 #include "netinet/in.h"
38 #include "afs/afs_osi.h"
39 #define RFTP_INTERNALS 1
40 #include "afs/volerrors.h"
44 #include "afs/exporter.h"
46 #include "afs/afs_chunkops.h"
47 #include "afs/afs_stats.h"
48 #include "afs/nfsclient.h"
50 #include "afs/prs_fs.h"
52 #include "afsincludes.h"
56 afs_gn_link(struct vnode
*vp
,
63 AFS_STATCNT(afs_gn_link
);
64 error
= afs_link(vp
, dp
, name
, cred
);
65 afs_Trace3(afs_iclSetp
, CM_TRACE_GNLINK
, ICL_TYPE_POINTER
, vp
,
66 ICL_TYPE_STRING
, name
, ICL_TYPE_LONG
, error
);
72 afs_gn_mkdir(struct vnode
*dp
,
82 AFS_STATCNT(afs_gn_mkdir
);
85 va
.va_mode
= (mode
& 07777) & ~get_umask();
86 error
= afs_mkdir(dp
, name
, &va
, &vp
, cred
);
90 afs_Trace4(afs_iclSetp
, CM_TRACE_GMKDIR
, ICL_TYPE_POINTER
, vp
,
91 ICL_TYPE_STRING
, name
, ICL_TYPE_LONG
, mode
, ICL_TYPE_LONG
,
98 afs_gn_mknod(struct vnode
*dp
,
109 AFS_STATCNT(afs_gn_mknod
);
111 va
.va_type
= IFTOVT(mode
);
112 va
.va_mode
= (mode
& 07777) & ~get_umask();
114 /**** I'm not sure if suser() should stay here since it makes no sense in AFS; however the documentation says that one "should be super-user unless making a FIFO file. Others systems such as SUN do this checking in the early stages of mknod (before the abstraction), so it's equivalently the same! *****/
115 if (va
.va_type
!= VFIFO
&& !suser((char *)&error
))
117 switch (va
.va_type
) {
119 error
= afs_mkdir(dp
, name
, &va
, &vp
, cred
);
129 error
= afs_create(VTOAFS(dp
), name
, &va
, NONEXCL
, mode
, (struct vcache
**)&vp
, cred
);
134 afs_Trace4(afs_iclSetp
, CM_TRACE_GMKNOD
, ICL_TYPE_POINTER
, (afs_int32
) vp
,
135 ICL_TYPE_STRING
, name
, ICL_TYPE_LONG
, mode
, ICL_TYPE_LONG
,
142 afs_gn_remove(struct vnode
*vp
, /* Ignored in AFS */
149 AFS_STATCNT(afs_gn_remove
);
150 error
= afs_remove(dp
, name
, cred
);
151 afs_Trace3(afs_iclSetp
, CM_TRACE_GREMOVE
, ICL_TYPE_POINTER
, dp
,
152 ICL_TYPE_STRING
, name
, ICL_TYPE_LONG
, error
);
158 afs_gn_rename(struct vnode
*vp
, /* Ignored in AFS */
161 struct vnode
*tp
, /* Ignored in AFS */
168 AFS_STATCNT(afs_gn_rename
);
169 error
= afs_rename(dp
, name
, tdp
, tname
, cred
);
170 afs_Trace4(afs_iclSetp
, CM_TRACE_GRENAME
, ICL_TYPE_POINTER
, dp
,
171 ICL_TYPE_STRING
, name
, ICL_TYPE_STRING
, tname
, ICL_TYPE_LONG
,
178 afs_gn_rmdir(struct vnode
*vp
, /* Ignored in AFS */
185 AFS_STATCNT(afs_gn_rmdir
);
186 error
= afs_rmdir(dp
, name
, cred
);
188 if (error
== 66 /* 4.3's ENOTEMPTY */ )
189 error
= EEXIST
; /* AIX returns EEXIST where 4.3 used ENOTEMPTY */
191 afs_Trace3(afs_iclSetp
, CM_TRACE_GRMDIR
, ICL_TYPE_POINTER
, dp
,
192 ICL_TYPE_STRING
, name
, ICL_TYPE_LONG
, error
);
198 afs_gn_lookup(struct vnode
*dp
,
201 int32long64_t Flags
, /* includes FOLLOW... */
202 struct vattr
*vattrp
,
208 AFS_STATCNT(afs_gn_lookup
);
209 error
= afs_lookup(dp
, name
, vpp
, cred
);
210 afs_Trace3(afs_iclSetp
, CM_TRACE_GLOOKUP
, ICL_TYPE_POINTER
, dp
,
211 ICL_TYPE_STRING
, name
, ICL_TYPE_LONG
, error
);
212 if (vattrp
!= NULL
&& error
== 0)
213 afs_gn_getattr(*vpp
, vattrp
, cred
);
219 afs_gn_fid(struct vnode
*vp
,
225 AFS_STATCNT(afs_gn_fid
);
226 error
= afs_fid(vp
, fidp
);
227 afs_Trace3(afs_iclSetp
, CM_TRACE_GFID
, ICL_TYPE_POINTER
, vp
,
228 ICL_TYPE_LONG
, (afs_int32
) fidp
, ICL_TYPE_LONG
, error
);
234 afs_gn_open(struct vnode
*vp
,
237 struct ucred
**vinfop
,
242 struct vcache
*tvp
= VTOAFS(vp
);
246 AFS_STATCNT(afs_gn_open
);
252 if ((flags
& FWRITE
) || (flags
& FTRUNC
))
255 while ((flags
& FNSHARE
) && tvp
->opens
) {
256 if (!(flags
& FDELAY
)) {
260 afs_osi_Sleep(&tvp
->opens
);
263 error
= afs_access(VTOAFS(vp
), modes
, cred
);
268 error
= afs_open((struct vcache
**) &vp
, flags
, cred
);
270 if (flags
& FTRUNC
) {
273 error
= afs_setattr(VTOAFS(vp
), &va
, cred
);
277 tvp
->f
.states
|= CNSHARE
;
280 *vinfop
= cred
; /* fp->f_vinfo is like fp->f_cred in suns */
282 /* an error occurred; we've told CM that the file
283 * is open, so close it now so that open and
284 * writer counts are correct. Ignore error code,
285 * as it is likely to fail (the setattr just did).
287 afs_close(vp
, flags
, cred
);
292 afs_Trace3(afs_iclSetp
, CM_TRACE_GOPEN
, ICL_TYPE_POINTER
, vp
,
293 ICL_TYPE_LONG
, flags
, ICL_TYPE_LONG
, error
);
299 afs_gn_create(struct vnode
*dp
,
304 struct ucred
**vinfop
, /* return ptr for fp->f_vinfo, used as fp->f_cred */
309 enum vcexcl exclusive
;
310 int error
, modes
= 0;
314 AFS_STATCNT(afs_gn_create
);
315 if ((flags
& (O_EXCL
| O_CREAT
)) == (O_EXCL
| O_CREAT
))
321 va
.va_mode
= (mode
& 07777) & ~get_umask();
326 if ((flags
& FWRITE
) || (flags
& FTRUNC
))
328 error
= afs_create(VTOAFS(dp
), name
, &va
, exclusive
, modes
, (struct vcache
**)vpp
, cred
);
332 /* 'cr_luid' is a flag (when it comes thru the NFS server it's set to
333 * RMTUSER_REQ) that determines if we should call afs_open(). We shouldn't
334 * call it when this NFS traffic since the close will never happen thus
335 * we'd never flush the files out to the server! Gross but the simplest
336 * solution we came out with */
337 if (cred
->cr_luid
!= RMTUSER_REQ
) {
338 while ((flags
& FNSHARE
) && VTOAFS(*vpp
)->opens
) {
339 if (!(flags
& FDELAY
))
341 afs_osi_Sleep(&VTOAFS(*vpp
)->opens
);
343 /* Since in the standard copen() for bsd vnode kernels they do an
344 * vop_open after the vop_create, we must do the open here since there
345 * are stuff in afs_open that we need. For example advance the
346 * execsOrWriters flag (else we'll be treated as the sun's "core"
348 *vinfop
= cred
; /* save user creds in fp->f_vinfo */
349 error
= afs_open((struct vcache
**)vpp
, flags
, cred
);
351 afs_Trace4(afs_iclSetp
, CM_TRACE_GCREATE
, ICL_TYPE_POINTER
, dp
,
352 ICL_TYPE_STRING
, name
, ICL_TYPE_LONG
, mode
, ICL_TYPE_LONG
,
359 afs_gn_hold(struct vnode
*vp
)
361 AFS_STATCNT(afs_gn_hold
);
369 afs_gn_rele(struct vnode
*vp
)
371 struct vcache
*vcp
= VTOAFS(vp
);
374 AFS_STATCNT(afs_gn_rele
);
375 if (vp
->v_count
== 0)
376 osi_Panic("afs_rele: zero v_count");
377 if (--(vp
->v_count
) == 0) {
378 if (vcp
->f
.states
& CPageHog
) {
380 vcp
->f
.states
&= ~CPageHog
;
382 error
= afs_inactive(vp
, 0);
389 afs_gn_close(struct vnode
*vp
,
391 caddr_t vinfo
, /* Ignored in AFS */
395 struct vcache
*tvp
= VTOAFS(vp
);
398 AFS_STATCNT(afs_gn_close
);
400 if (flags
& FNSHARE
) {
401 tvp
->f
.states
&= ~CNSHARE
;
402 afs_osi_Wakeup(&tvp
->opens
);
405 error
= afs_close(vp
, flags
, cred
);
406 afs_Trace3(afs_iclSetp
, CM_TRACE_GCLOSE
, ICL_TYPE_POINTER
, (afs_int32
) vp
,
407 ICL_TYPE_LONG
, flags
, ICL_TYPE_LONG
, error
);
413 afs_gn_map(struct vnode
*vp
,
420 struct vcache
*vcp
= VTOAFS(vp
);
421 struct vrequest treq
;
425 afs_int32 flag
= Flag
;
427 AFS_STATCNT(afs_gn_map
);
429 if (error
= afs_InitReq(&treq
, cred
))
431 error
= afs_VerifyVCache(vcp
, &treq
);
433 return afs_CheckCode(error
, &treq
, 49);
435 osi_FlushPages(vcp
, cred
); /* XXX ensure old pages are gone XXX */
436 ObtainWriteLock(&vcp
->lock
, 401);
437 vcp
->f
.states
|= CMAPPED
; /* flag cleared at afs_inactive */
439 * We map the segment into our address space using the handle returned by vm_create.
442 afs_uint32 tlen
= vcp
->f
.m
.Length
;
443 #ifdef AFS_64BIT_CLIENT
444 if (vcp
->f
.m
.Length
> afs_vmMappingEnd
)
445 tlen
= afs_vmMappingEnd
;
447 /* Consider V_INTRSEG too for interrupts */
449 vms_create(&vcp
->segid
, V_CLIENT
, (dev_t
) vcp
->v
.v_gnode
, tlen
, 0, 0)) {
450 ReleaseWriteLock(&vcp
->lock
);
453 #ifdef AFS_64BIT_KERNEL
454 vcp
->vmh
= vm_handle(vcp
->segid
, (int32long64_t
) 0);
456 vcp
->vmh
= SRVAL(vcp
->segid
, 0, 0);
459 vcp
->v
.v_gnode
->gn_seg
= vcp
->segid
; /* XXX Important XXX */
460 if (flag
& SHM_RDONLY
) {
461 vp
->v_gnode
->gn_mrdcnt
++;
463 vp
->v_gnode
->gn_mwrcnt
++;
466 * We keep the caller's credentials since an async daemon will handle the
467 * request at some point. We assume that the same credentials will be used.
469 if (!vcp
->credp
|| (vcp
->credp
!= cred
)) {
472 struct ucred
*crp
= vcp
->credp
;
478 ReleaseWriteLock(&vcp
->lock
);
480 afs_Trace4(afs_iclSetp
, CM_TRACE_GMAP
, ICL_TYPE_POINTER
, vp
,
481 ICL_TYPE_LONG
, addr
, ICL_TYPE_LONG
, len
, ICL_TYPE_LONG
, off
);
487 afs_gn_unmap(struct vnode
*vp
,
491 struct vcache
*vcp
= VTOAFS(vp
);
492 AFS_STATCNT(afs_gn_unmap
);
493 ObtainWriteLock(&vcp
->lock
, 402);
494 if (flag
& SHM_RDONLY
) {
495 vp
->v_gnode
->gn_mrdcnt
--;
496 if (vp
->v_gnode
->gn_mrdcnt
<= 0)
497 vp
->v_gnode
->gn_mrdcnt
= 0;
499 vp
->v_gnode
->gn_mwrcnt
--;
500 if (vp
->v_gnode
->gn_mwrcnt
<= 0)
501 vp
->v_gnode
->gn_mwrcnt
= 0;
503 ReleaseWriteLock(&vcp
->lock
);
511 afs_gn_access(struct vnode
*vp
,
521 AFS_STATCNT(afs_gn_access
);
527 error
= afs_access(VTOAFS(vp
), mode
, cred
);
529 /* Additional testing */
530 if (who
== ACC_OTHERS
|| who
== ACC_ANY
) {
531 error
= afs_getattr(VTOAFS(vp
), &vattr
, cred
);
533 if (who
== ACC_ANY
) {
534 if (((vattr
.va_mode
>> 6) & mode
) == mode
) {
539 if (((vattr
.va_mode
>> 3) & mode
) == mode
)
544 } else if (who
== ACC_ALL
) {
545 error
= afs_getattr(VTOAFS(vp
), &vattr
, cred
);
547 if ((!((vattr
.va_mode
>> 6) & mode
))
548 || (!((vattr
.va_mode
>> 3) & mode
))
549 || (!(vattr
.va_mode
& mode
)))
558 afs_Trace3(afs_iclSetp
, CM_TRACE_GACCESS
, ICL_TYPE_POINTER
, vp
,
559 ICL_TYPE_LONG
, mode
, ICL_TYPE_LONG
, error
);
565 afs_gn_getattr(struct vnode
*vp
,
566 struct vattr
*vattrp
,
571 AFS_STATCNT(afs_gn_getattr
);
572 error
= afs_getattr(VTOAFS(vp
), vattrp
, cred
);
573 afs_Trace2(afs_iclSetp
, CM_TRACE_GGETATTR
, ICL_TYPE_POINTER
, vp
,
574 ICL_TYPE_LONG
, error
);
580 afs_gn_setattr(struct vnode
*vp
,
590 AFS_STATCNT(afs_gn_setattr
);
598 if ((arg1
& T_OWNER_AS_IS
) == 0)
600 if ((arg1
& T_GROUP_AS_IS
) == 0)
605 error
= afs_access(vp
, VWRITE
, cred
);
609 if (arg1
& T_SETTIME
) {
610 va
.va_atime
.tv_sec
= time
;
611 va
.va_mtime
.tv_sec
= time
;
613 va
.va_atime
= *(struct timestruc_t
*)arg2
;
614 va
.va_mtime
= *(struct timestruc_t
*)arg3
;
622 error
= afs_setattr(VTOAFS(vp
), &va
, cred
);
624 afs_Trace2(afs_iclSetp
, CM_TRACE_GSETATTR
, ICL_TYPE_POINTER
, vp
,
625 ICL_TYPE_LONG
, error
);
630 char zero_buffer
[PAGESIZE
];
632 afs_gn_fclear(struct vnode
*vp
,
639 int i
, len
, error
= 0;
642 static int fclear_init
= 0;
643 struct vcache
*avc
= VTOAFS(vp
);
645 memset(&uio
, 0, sizeof(uio
));
646 memset(&iov
, 0, sizeof(iov
));
648 AFS_STATCNT(afs_gn_fclear
);
650 memset(zero_buffer
, 0, PAGESIZE
);
654 * Don't clear past ulimit
656 if (offset
+ length
> get_ulimit())
659 /* Flush all pages first */
662 vm_flushp(avc
->segid
, 0, MAXFSIZE
/ PAGESIZE
- 1);
663 vms_iowait(avc
->segid
);
666 uio
.afsio_offset
= offset
;
667 for (i
= offset
; i
< offset
+ length
; i
= uio
.afsio_offset
) {
668 len
= offset
+ length
- i
;
669 iov
.iov_len
= (len
> PAGESIZE
) ? PAGESIZE
: len
;
670 iov
.iov_base
= zero_buffer
;
671 uio
.afsio_iov
= &iov
;
672 uio
.afsio_iovcnt
= 1;
673 uio
.afsio_seg
= AFS_UIOSYS
;
674 uio
.afsio_resid
= iov
.iov_len
;
675 if (error
= afs_rdwr(VTOAFS(vp
), &uio
, UIO_WRITE
, 0, cred
))
678 afs_Trace4(afs_iclSetp
, CM_TRACE_GFCLEAR
, ICL_TYPE_POINTER
, vp
,
679 ICL_TYPE_LONG
, offset
, ICL_TYPE_LONG
, length
, ICL_TYPE_LONG
,
686 afs_gn_fsync(struct vnode
*vp
,
687 int32long64_t flags
, /* Not used by AFS */
688 int32long64_t vinfo
, /* Not used by AFS */
693 AFS_STATCNT(afs_gn_fsync
);
694 error
= afs_fsync(vp
, cred
);
695 afs_Trace3(afs_iclSetp
, CM_TRACE_GFSYNC
, ICL_TYPE_POINTER
, vp
,
696 ICL_TYPE_LONG
, flags
, ICL_TYPE_LONG
, error
);
702 afs_gn_ftrunc(struct vnode
*vp
,
711 AFS_STATCNT(afs_gn_ftrunc
);
714 error
= afs_setattr(VTOAFS(vp
), &va
, cred
);
715 afs_Trace4(afs_iclSetp
, CM_TRACE_GFTRUNC
, ICL_TYPE_POINTER
, vp
,
716 ICL_TYPE_LONG
, flags
, ICL_TYPE_OFFSET
,
717 ICL_HANDLE_OFFSET(length
), ICL_TYPE_LONG
, error
);
721 /* Min size of a file which is dumping core before we declare it a page hog. */
722 #define MIN_PAGE_HOG_SIZE 8388608
725 afs_gn_rdwr(struct vnode
*vp
,
729 ext_t ext
, /* Ignored in AFS */
730 caddr_t vinfo
, /* Ignored in AFS */
731 struct vattr
*vattrp
,
734 struct vcache
*vcp
= VTOAFS(vp
);
735 struct vrequest treq
;
740 AFS_STATCNT(afs_gn_rdwr
);
743 if (op
== UIO_WRITE
) {
744 afs_Trace2(afs_iclSetp
, CM_TRACE_GRDWR1
, ICL_TYPE_POINTER
, vp
,
745 ICL_TYPE_LONG
, vcp
->vc_error
);
746 return vcp
->vc_error
;
751 ObtainSharedLock(&vcp
->lock
, 507);
753 * We keep the caller's credentials since an async daemon will handle the
754 * request at some point. We assume that the same credentials will be used.
755 * If this is being called from an NFS server thread, then dupe the
756 * cred and only use that copy in calls and for the stach.
758 if (!vcp
->credp
|| (vcp
->credp
!= cred
)) {
759 #ifdef AFS_AIX_IAUTH_ENV
760 if (AFS_NFSXLATORREQ(cred
)) {
761 /* Must be able to use cred later, so dupe it so that nfs server
762 * doesn't overwrite it's contents.
768 crhold(cred
); /* Bump refcount for reference in vcache */
772 UpgradeSToWLock(&vcp
->lock
, 508);
775 ConvertWToSLock(&vcp
->lock
);
780 ReleaseSharedLock(&vcp
->lock
);
783 * XXX Is the following really required?? XXX
785 if (error
= afs_InitReq(&treq
, cred
))
787 if (error
= afs_VerifyVCache(vcp
, &treq
))
788 return afs_CheckCode(error
, &treq
, 50);
789 osi_FlushPages(vcp
, cred
); /* Flush old pages */
791 if (AFS_NFSXLATORREQ(cred
)) {
794 if (op
== UIO_READ
) {
796 (vcp
, PRSFS_READ
, &treq
,
797 CHECK_MODE_BITS
| CMB_ALLOW_EXEC_AS_READ
)) {
806 * We have to bump the open/exwriters field here courtesy of the nfs xlator
807 * because there're no open/close nfs rpcs to call our afs_open/close.
808 * We do a similar thing on the afs_read/write interface.
810 if (op
== UIO_WRITE
) {
811 #ifdef AFS_64BIT_CLIENT
812 if (ubuf
->afsio_offset
< afs_vmMappingEnd
) {
813 #endif /* AFS_64BIT_CLIENT */
814 ObtainWriteLock(&vcp
->lock
, 240);
815 vcp
->f
.states
|= CDirty
; /* Set the dirty bit */
817 ReleaseWriteLock(&vcp
->lock
);
818 #ifdef AFS_64BIT_CLIENT
820 #endif /* AFS_64BIT_CLIENT */
823 error
= afs_vm_rdwr(vp
, ubuf
, op
, flags
, cred
);
825 if (op
== UIO_WRITE
) {
826 #ifdef AFS_64BIT_CLIENT
827 if (ubuf
->afsio_offset
< afs_vmMappingEnd
) {
828 #endif /* AFS_64BIT_CLIENT */
829 ObtainWriteLock(&vcp
->lock
, 241);
830 afs_FakeClose(vcp
, cred
); /* XXXX For nfs trans and cores XXXX */
831 ReleaseWriteLock(&vcp
->lock
);
832 #ifdef AFS_64BIT_CLIENT
834 #endif /* AFS_64BIT_CLIENT */
836 if (vattrp
!= NULL
&& error
== 0)
837 afs_gn_getattr(vp
, vattrp
, cred
);
839 afs_Trace4(afs_iclSetp
, CM_TRACE_GRDWR
, ICL_TYPE_POINTER
, vp
,
840 ICL_TYPE_LONG
, flags
, ICL_TYPE_LONG
, op
, ICL_TYPE_LONG
, error
);
847 #define AFS_MAX_VM_CHUNKS 10
849 afs_vm_rdwr(struct vnode
*vp
,
858 afs_size_t fileSize
, xfrOffset
, offset
, old_offset
, xfrSize
;
860 #ifdef AFS_64BIT_CLIENT
861 afs_size_t finalOffset
;
864 afs_size_t add2resid
= 0;
865 #endif /* AFS_64BIT_CLIENT */
866 struct vcache
*vcp
= VTOAFS(vp
);
868 afs_size_t start_offset
;
869 afs_int32 save_resid
= uiop
->afsio_resid
;
870 int first_page
, last_page
, pages
;
873 struct vrequest treq
;
875 if (code
= afs_InitReq(&treq
, credp
))
878 /* special case easy transfer; apparently a lot are done */
879 if ((xfrSize
= uiop
->afsio_resid
) == 0)
882 ObtainReadLock(&vcp
->lock
);
883 fileSize
= vcp
->f
.m
.Length
;
884 if (rw
== UIO_WRITE
&& (ioflag
& IO_APPEND
)) { /* handle IO_APPEND mode */
885 uiop
->afsio_offset
= fileSize
;
887 /* compute xfrOffset now, and do some checks */
888 xfrOffset
= uiop
->afsio_offset
;
889 if (xfrOffset
< 0 || xfrOffset
+ xfrSize
< 0) {
891 ReleaseReadLock(&vcp
->lock
);
894 #ifndef AFS_64BIT_CLIENT
895 /* check for "file too big" error, which should really be done above us */
896 if (rw
== UIO_WRITE
&& xfrSize
+ fileSize
> get_ulimit()) {
898 ReleaseReadLock(&vcp
->lock
);
901 #endif /* AFS_64BIT_CLIENT */
903 #ifdef AFS_64BIT_CLIENT
904 if (xfrOffset
+ xfrSize
> afs_vmMappingEnd
) {
905 if (rw
== UIO_READ
) {
906 /* don't read past EOF */
907 if (xfrSize
+xfrOffset
> fileSize
) {
908 add2resid
= xfrSize
+ xfrOffset
- fileSize
;
909 xfrSize
= fileSize
- xfrOffset
;
911 ReleaseReadLock(&vcp
->lock
);
915 afsio_trim(uiop
, txfrSize
);
918 if (xfrOffset
< afs_vmMappingEnd
) {
919 /* special case of a buffer crossing the VM mapping line */
921 struct iovec tvec
[16]; /* Should have access to #define */
924 memset(&tuio
, 0, sizeof(tuio
));
925 memset(&tvec
, 0, sizeof(tvec
));
928 finalOffset
= xfrOffset
+ xfrSize
;
929 tsize
= (afs_size_t
) (xfrOffset
+ xfrSize
- afs_vmMappingEnd
);
931 afsio_copy(uiop
, &tuio
, tvec
);
932 afsio_skip(&tuio
, txfrSize
- tsize
);
933 afsio_trim(&tuio
, tsize
);
934 tuio
.afsio_offset
= afs_vmMappingEnd
;
935 ReleaseReadLock(&vcp
->lock
);
936 ObtainWriteLock(&vcp
->lock
, 243);
937 afs_FakeClose(vcp
, credp
); /* XXXX For nfs trans and cores XXXX */
938 ReleaseWriteLock(&vcp
->lock
);
939 code
= afs_direct_rdwr(vp
, &tuio
, rw
, ioflag
, credp
);
940 ObtainWriteLock(&vcp
->lock
, 244);
941 afs_FakeOpen(vcp
); /* XXXX For nfs trans and cores XXXX */
942 ReleaseWriteLock(&vcp
->lock
);
945 ObtainReadLock(&vcp
->lock
);
946 xfrSize
= afs_vmMappingEnd
- xfrOffset
;
948 afsio_trim(uiop
, txfrSize
);
950 ReleaseReadLock(&vcp
->lock
);
951 code
= afs_direct_rdwr(vp
, uiop
, rw
, ioflag
, credp
);
952 uiop
->uio_resid
+= add2resid
;
956 #endif /* AFS_64BIT_CLIENT */
959 afs_uint32 tlen
= vcp
->f
.m
.Length
;
960 #ifdef AFS_64BIT_CLIENT
961 if (vcp
->f
.m
.Length
> afs_vmMappingEnd
)
962 tlen
= afs_vmMappingEnd
;
964 /* Consider V_INTRSEG too for interrupts */
966 vms_create(&vcp
->segid
, V_CLIENT
, (dev_t
) vcp
->v
.v_gnode
, tlen
, 0, 0)) {
967 ReleaseReadLock(&vcp
->lock
);
970 #ifdef AFS_64BIT_KERNEL
971 vcp
->vmh
= vm_handle(vcp
->segid
, (int32long64_t
) 0);
973 vcp
->vmh
= SRVAL(vcp
->segid
, 0, 0);
976 vcp
->v
.v_gnode
->gn_seg
= vcp
->segid
;
977 if (rw
== UIO_READ
) {
978 ReleaseReadLock(&vcp
->lock
);
979 /* don't read past EOF */
980 if (xfrSize
+ xfrOffset
> fileSize
)
981 xfrSize
= fileSize
- xfrOffset
;
984 #ifdef AFS_64BIT_CLIENT
986 uiop
->afsio_offset
= xfrOffset
;
987 afs_Trace3(afs_iclSetp
, CM_TRACE_VMWRITE
, ICL_TYPE_POINTER
, vcp
,
988 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(xfrOffset
),
989 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(xfrSize
));
992 code
= vm_move(vcp
->segid
, toffset
, txfrSize
, rw
, uiop
);
993 #else /* AFS_64BIT_CLIENT */
995 code
= vm_move(vcp
->segid
, xfrOffset
, xfrSize
, rw
, uiop
);
996 #endif /* AFS_64BIT_CLIENT */
999 * If at a chunk boundary and staying within chunk,
1000 * start prefetch of next chunk.
1002 if (counter
== 0 || AFS_CHUNKOFFSET(xfrOffset
) == 0
1003 && xfrSize
<= AFS_CHUNKSIZE(xfrOffset
)) {
1004 ObtainWriteLock(&vcp
->lock
, 407);
1005 tdc
= afs_FindDCache(vcp
, xfrOffset
);
1007 if (!(tdc
->mflags
& DFNextStarted
))
1008 afs_PrefetchChunk(vcp
, tdc
, credp
, &treq
);
1011 ReleaseWriteLock(&vcp
->lock
);
1013 #ifdef AFS_64BIT_CLIENT
1015 uiop
->afsio_offset
= finalOffset
;
1017 uiop
->uio_resid
+= add2resid
;
1018 #endif /* AFS_64BIT_CLIENT */
1023 start_offset
= uiop
->afsio_offset
;
1024 afs_Trace3(afs_iclSetp
, CM_TRACE_VMWRITE
, ICL_TYPE_POINTER
, vcp
,
1025 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(start_offset
),
1026 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(xfrSize
));
1027 ReleaseReadLock(&vcp
->lock
);
1028 ObtainWriteLock(&vcp
->lock
, 400);
1029 vcp
->f
.m
.Date
= osi_Time(); /* Set file date (for ranlib) */
1031 /* un-protect last page. */
1032 last_page
= vcp
->f
.m
.Length
/ PAGESIZE
;
1033 #ifdef AFS_64BIT_CLIENT
1034 if (vcp
->f
.m
.Length
> afs_vmMappingEnd
)
1035 last_page
= afs_vmMappingEnd
/ PAGESIZE
;
1037 vm_protectp(vcp
->segid
, last_page
, 1, FILEKEY
);
1038 if (xfrSize
+ xfrOffset
> fileSize
) {
1039 vcp
->f
.m
.Length
= xfrSize
+ xfrOffset
;
1041 if ((!(vcp
->f
.states
& CPageHog
)) && (xfrSize
>= MIN_PAGE_HOG_SIZE
)) {
1043 vcp
->f
.states
|= CPageHog
;
1045 ReleaseWriteLock(&vcp
->lock
);
1047 /* If the write will fit into a single chunk we'll write all of it
1048 * at once. Otherwise, we'll write one chunk at a time, flushing
1049 * some of it to disk.
1053 /* Only create a page to avoid excess VM access if we're writing a
1054 * small file which is either new or completely overwrites the
1057 if ((xfrOffset
== 0) && (xfrSize
< PAGESIZE
) && (xfrSize
>= fileSize
)
1058 && (vcp
->v
.v_gnode
->gn_mwrcnt
== 0)
1059 && (vcp
->v
.v_gnode
->gn_mrdcnt
== 0)) {
1060 (void)vm_makep(vcp
->segid
, 0);
1063 while (xfrSize
> 0) {
1064 offset
= AFS_CHUNKBASE(xfrOffset
);
1067 if (AFS_CHUNKSIZE(xfrOffset
) <= len
)
1069 (afs_size_t
) AFS_CHUNKSIZE(xfrOffset
) - (xfrOffset
- offset
);
1071 if (len
== xfrSize
) {
1072 /* All data goes to this one chunk. */
1074 old_offset
= uiop
->afsio_offset
;
1075 #ifdef AFS_64BIT_CLIENT
1076 uiop
->afsio_offset
= xfrOffset
;
1077 toffset
= xfrOffset
;
1079 code
= vm_move(vcp
->segid
, toffset
, txfrSize
, rw
, uiop
);
1080 #else /* AFS_64BIT_CLIENT */
1081 code
= vm_move(vcp
->segid
, xfrOffset
, xfrSize
, rw
, uiop
);
1082 #endif /* AFS_64BIT_CLIENT */
1090 /* Write just one chunk's worth of data. */
1092 struct iovec tvec
[16]; /* Should have access to #define */
1094 memset(&tuio
, 0, sizeof(tuio
));
1095 memset(&tvec
, 0, sizeof(tvec
));
1097 /* Purge dirty chunks of file if there are too many dirty chunks.
1098 * Inside the write loop, we only do this at a chunk boundary.
1099 * Clean up partial chunk if necessary at end of loop.
1101 if (counter
> 0 && code
== 0 && xfrOffset
== offset
) {
1102 ObtainWriteLock(&vcp
->lock
, 403);
1103 if (xfrOffset
> vcp
->f
.m
.Length
)
1104 vcp
->f
.m
.Length
= xfrOffset
;
1105 code
= afs_DoPartialWrite(vcp
, &treq
);
1106 vcp
->f
.states
|= CDirty
;
1107 ReleaseWriteLock(&vcp
->lock
);
1114 afsio_copy(uiop
, &tuio
, tvec
);
1115 afsio_trim(&tuio
, len
);
1116 tuio
.afsio_offset
= xfrOffset
;
1119 old_offset
= uiop
->afsio_offset
;
1120 #ifdef AFS_64BIT_CLIENT
1121 toffset
= xfrOffset
;
1122 code
= vm_move(vcp
->segid
, toffset
, len
, rw
, &tuio
);
1123 #else /* AFS_64BIT_CLIENT */
1124 code
= vm_move(vcp
->segid
, xfrOffset
, len
, rw
, &tuio
);
1125 #endif /* AFS_64BIT_CLIENT */
1127 len
-= tuio
.afsio_resid
;
1128 if (code
|| (len
<= 0)) {
1129 code
= code
? code
: EINVAL
;
1132 afsio_skip(uiop
, len
);
1137 first_page
= (afs_size_t
) old_offset
>> PGSHIFT
;
1139 1 + (((afs_size_t
) old_offset
+ (len
- 1)) >> PGSHIFT
) -
1141 afs_Trace3(afs_iclSetp
, CM_TRACE_VMWRITE2
, ICL_TYPE_POINTER
, vcp
,
1142 ICL_TYPE_INT32
, first_page
, ICL_TYPE_INT32
, pages
);
1144 code
= vm_writep(vcp
->segid
, first_page
, pages
);
1149 if (++count
> AFS_MAX_VM_CHUNKS
) {
1151 code
= vms_iowait(vcp
->segid
);
1153 /* cache device failure? */
1164 code
= vms_iowait(vcp
->segid
);
1167 /* cache device failure? */
1172 ObtainWriteLock(&vcp
->lock
, 242);
1173 if (code
== 0 && (vcp
->f
.states
& CDirty
)) {
1174 code
= afs_DoPartialWrite(vcp
, &treq
);
1176 vm_protectp(vcp
->segid
, last_page
, 1, RDONLY
);
1177 ReleaseWriteLock(&vcp
->lock
);
1179 /* If requested, fsync the file after every write */
1181 afs_fsync(vp
, credp
);
1183 ObtainReadLock(&vcp
->lock
);
1184 if (vcp
->vc_error
) {
1185 /* Pretend we didn't write anything. We need to get the error back to
1186 * the user. If we don't it's possible for a quota error for this
1187 * write to succeed and the file to be closed without the user ever
1188 * having seen the store error. And AIX syscall clears the error if
1189 * anything was written.
1191 code
= vcp
->vc_error
;
1192 if (code
== EDQUOT
|| code
== ENOSPC
)
1193 uiop
->afsio_resid
= save_resid
;
1195 #ifdef AFS_64BIT_CLIENT
1197 uiop
->afsio_offset
= finalOffset
;
1199 #endif /* AFS_64BIT_CLIENT */
1200 ReleaseReadLock(&vcp
->lock
);
1203 afs_Trace2(afs_iclSetp
, CM_TRACE_VMWRITE3
, ICL_TYPE_POINTER
, vcp
,
1204 ICL_TYPE_INT32
, code
);
1210 afs_direct_rdwr(struct vnode
*vp
,
1214 struct ucred
*credp
)
1217 afs_size_t fileSize
, xfrOffset
, offset
, old_offset
, xfrSize
;
1218 struct vcache
*vcp
= VTOAFS(vp
);
1219 afs_int32 save_resid
= uiop
->afsio_resid
;
1220 struct vrequest treq
;
1222 if (code
= afs_InitReq(&treq
, credp
))
1225 /* special case easy transfer; apparently a lot are done */
1226 if ((xfrSize
= uiop
->afsio_resid
) == 0)
1229 ObtainReadLock(&vcp
->lock
);
1230 fileSize
= vcp
->f
.m
.Length
;
1231 if (rw
== UIO_WRITE
&& (ioflag
& IO_APPEND
)) { /* handle IO_APPEND mode */
1232 uiop
->afsio_offset
= fileSize
;
1234 /* compute xfrOffset now, and do some checks */
1235 xfrOffset
= uiop
->afsio_offset
;
1236 if (xfrOffset
< 0 || xfrOffset
+ xfrSize
< 0) {
1238 ReleaseReadLock(&vcp
->lock
);
1242 /* check for "file too big" error, which should really be done above us */
1244 if (rw
== UIO_WRITE
&& xfrSize
+ fileSize
> get_ulimit()) {
1246 ReleaseReadLock(&vcp
->lock
);
1250 ReleaseReadLock(&vcp
->lock
);
1251 if (rw
== UIO_WRITE
) {
1252 ObtainWriteLock(&vcp
->lock
, 400);
1253 vcp
->f
.m
.Date
= osi_Time(); /* Set file date (for ranlib) */
1255 if (xfrSize
+ xfrOffset
> fileSize
)
1256 vcp
->f
.m
.Length
= xfrSize
+ xfrOffset
;
1257 ReleaseWriteLock(&vcp
->lock
);
1259 afs_Trace3(afs_iclSetp
, CM_TRACE_DIRECTRDWR
, ICL_TYPE_POINTER
, vp
,
1260 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(uiop
->afsio_offset
),
1261 ICL_TYPE_LONG
, uiop
->afsio_resid
);
1262 code
= afs_rdwr(VTOAFS(vp
), uiop
, rw
, ioflag
, credp
);
1264 uiop
->afsio_resid
= save_resid
;
1266 uiop
->afsio_offset
= xfrOffset
+ xfrSize
;
1267 if (uiop
->afsio_resid
> 0) {
1268 /* should zero here the remaining buffer */
1269 uiop
->afsio_resid
= 0;
1271 /* Purge dirty chunks of file if there are too many dirty chunks.
1272 * Inside the write loop, we only do this at a chunk boundary.
1273 * Clean up partial chunk if necessary at end of loop.
1275 if (AFS_CHUNKBASE(uiop
->afsio_offset
) != AFS_CHUNKBASE(xfrOffset
)) {
1276 ObtainWriteLock(&vcp
->lock
, 402);
1277 code
= afs_DoPartialWrite(vcp
, &treq
);
1278 vcp
->f
.states
|= CDirty
;
1279 ReleaseWriteLock(&vcp
->lock
);
1289 lock_normalize(struct vnode
*vp
,
1290 struct flock
*lckdat
,
1297 switch (lckdat
->l_whence
) {
1301 lckdat
->l_start
+= (off_t
) offset
;
1304 code
= afs_getattr(VTOAFS(vp
), &vattr
, cred
);
1307 lckdat
->l_start
+= (off_t
) vattr
.va_size
;
1312 lckdat
->l_whence
= 0;
1319 afs_gn_lockctl(struct vnode
*vp
,
1321 struct eflock
*lckdat
,
1323 int (*ignored_fcn
) (),
1324 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
1326 #else /* AFS_AIX52_ENV */
1327 ulong32int64_t
* ignored_id
,
1328 #endif /* AFS_AIX52_ENV */
1331 int error
, ncmd
= 0;
1333 struct vattr
*attrs
;
1335 AFS_STATCNT(afs_gn_lockctl
);
1336 /* Convert from AIX's cmd to standard lockctl lock types... */
1339 else if (cmd
& SETFLCK
) {
1344 flkd
.l_type
= lckdat
->l_type
;
1345 flkd
.l_whence
= lckdat
->l_whence
;
1346 flkd
.l_start
= lckdat
->l_start
;
1347 flkd
.l_len
= lckdat
->l_len
;
1348 flkd
.l_pid
= lckdat
->l_pid
;
1349 flkd
.l_sysid
= lckdat
->l_sysid
;
1351 if (flkd
.l_start
!= lckdat
->l_start
|| flkd
.l_len
!= lckdat
->l_len
)
1353 if (error
= lock_normalize(vp
, &flkd
, offset
, cred
))
1355 error
= afs_lockctl(vp
, &flkd
, ncmd
, cred
);
1356 lckdat
->l_type
= flkd
.l_type
;
1357 lckdat
->l_whence
= flkd
.l_whence
;
1358 lckdat
->l_start
= flkd
.l_start
;
1359 lckdat
->l_len
= flkd
.l_len
;
1360 lckdat
->l_pid
= flkd
.l_pid
;
1361 lckdat
->l_sysid
= flkd
.l_sysid
;
1362 afs_Trace3(afs_iclSetp
, CM_TRACE_GLOCKCTL
, ICL_TYPE_POINTER
, vp
,
1363 ICL_TYPE_LONG
, ncmd
, ICL_TYPE_LONG
, error
);
1368 /* NOTE: In the nfs glue routine (nfs_gn2sun.c) the order was wrong (vp, flags, cmd, arg, ext); was that another typo? */
1370 afs_gn_ioctl(struct vnode
*vp
,
1373 size_t flags
, /* Ignored in AFS */
1374 ext_t ext
, /* Ignored in AFS */
1375 struct ucred
*crp
) /* Ignored in AFS */
1380 AFS_STATCNT(afs_gn_ioctl
);
1381 /* This seems to be a perfect fit for our ioctl redirection (afs_xioctl hack); thus the ioctl(2) entry in sysent.c is unaffected in the aix/afs port. */
1382 error
= afs_ioctl(vp
, cmd
, arg
);
1383 afs_Trace3(afs_iclSetp
, CM_TRACE_GIOCTL
, ICL_TYPE_POINTER
, vp
,
1384 ICL_TYPE_LONG
, cmd
, ICL_TYPE_LONG
, error
);
1390 afs_gn_readlink(struct vnode
*vp
,
1396 AFS_STATCNT(afs_gn_readlink
);
1397 error
= afs_readlink(vp
, uiop
, cred
);
1398 afs_Trace2(afs_iclSetp
, CM_TRACE_GREADLINK
, ICL_TYPE_POINTER
, vp
,
1399 ICL_TYPE_LONG
, error
);
1405 afs_gn_select(struct vnode
*vp
,
1406 int32long64_t correl
,
1413 AFS_STATCNT(afs_gn_select
);
1414 /* NO SUPPORT for this in afs YET! */
1415 return (EOPNOTSUPP
);
1420 afs_gn_symlink(struct vnode
*vp
,
1428 AFS_STATCNT(afs_gn_symlink
);
1431 error
= afs_symlink(vp
, link
, &va
, target
, NULL
, cred
);
1432 afs_Trace4(afs_iclSetp
, CM_TRACE_GSYMLINK
, ICL_TYPE_POINTER
, vp
,
1433 ICL_TYPE_STRING
, link
, ICL_TYPE_STRING
, target
, ICL_TYPE_LONG
,
1440 afs_gn_readdir(struct vnode
*vp
,
1446 AFS_STATCNT(afs_gn_readdir
);
1447 error
= afs_readdir(vp
, uiop
, cred
);
1448 afs_Trace2(afs_iclSetp
, CM_TRACE_GREADDIR
, ICL_TYPE_POINTER
, vp
,
1449 ICL_TYPE_LONG
, error
);
1454 extern Simple_lock afs_asyncbuf_lock
;
1455 extern struct buf
*afs_asyncbuf
;
1456 extern int afs_asyncbuf_cv
;
1459 * Buffers are ranked by age. A buffer's age is the value of afs_biotime
1460 * when the buffer is processed by afs_gn_strategy. afs_biotime is
1461 * incremented for each buffer. A buffer's age is kept in its av_back field.
1462 * The age ranking is used by the daemons, which favor older buffers.
1464 afs_int32 afs_biotime
= 0;
1466 /* This function is called with a list of buffers, threaded through
1467 * the av_forw field. Our goal is to copy the list of buffers into the
1468 * afs_asyncbuf list, sorting buffers into sublists linked by the b_work field.
1469 * Within buffers within the same work group, the guy with the lowest address
1470 * has to be located at the head of the queue; his b_bcount field will also
1471 * be increased to cover all of the buffers in the b_work queue.
1473 #define AIX_VM_BLKSIZE 8192
1474 /* Note: This function seems to be called as ddstrategy entry point, ie
1475 * has one argument. However, it also needs to be present as
1476 * vn_strategy entry point which has three arguments, but it seems to never
1477 * be called in that capacity (it would fail horribly due to the argument
1478 * mismatch). I'm confused, but it obviously has to be this way, maybe
1479 * some IBM people can shed som light on this
1482 afs_gn_strategy(struct buf
*abp
)
1484 struct buf
**lbp
, *tbp
;
1486 struct buf
*nbp
, *qbp
, *qnbp
, *firstComparable
;
1490 #define EFS_COMPARABLE(x,y) ((x)->b_vp == (y)->b_vp \
1491 && (x)->b_xmemd.subspace_id == (y)->b_xmemd.subspace_id \
1492 && (x)->b_flags == (y)->b_flags \
1493 && !((x)->b_flags & B_PFPROT) \
1494 && !((y)->b_flags & B_PFPROT))
1496 oldPriority
= disable_lock(INTMAX
, &afs_asyncbuf_lock
);
1497 for (tbp
= abp
; tbp
; tbp
= nbp
) {
1498 nbp
= tbp
->av_forw
; /* remember for later */
1500 tbp
->av_back
= (struct buf
*)afs_biotime
++;
1502 /* first insert the buffer into the afs_async queue. Insert buffer
1503 * sorted within its disk position within a set of comparable buffers.
1504 * Ensure that all comparable buffers are grouped contiguously.
1505 * Later on, we'll merge adjacent buffers into a single request.
1507 firstComparable
= NULL
;
1508 lbp
= &afs_asyncbuf
;
1509 for (qbp
= *lbp
; qbp
; lbp
= &qbp
->av_forw
, qbp
= *lbp
) {
1510 if (EFS_COMPARABLE(tbp
, qbp
)) {
1511 if (!firstComparable
)
1512 firstComparable
= qbp
;
1513 /* this buffer is comparable, so see if the next buffer
1514 * is farther in the file; if it is insert before next buffer.
1516 if (tbp
->b_blkno
< qbp
->b_blkno
) {
1520 /* If we're at the end of a block of comparable buffers, we
1521 * insert the buffer here to keep all comparable buffers
1524 if (firstComparable
)
1528 /* do the insert before qbp now */
1529 tbp
->av_forw
= *lbp
;
1531 if (firstComparable
== NULL
) {
1532 /* next we're going to do all sorts of buffer merging tricks, but
1533 * here we know we're the only COMPARABLE block in the
1534 * afs_asyncbuf list, so we just skip that and continue with
1535 * the next input buffer.
1540 /* we may have actually added the "new" firstComparable */
1541 if (tbp
->av_forw
== firstComparable
)
1542 firstComparable
= tbp
;
1544 * when we get here, firstComparable points to the first dude in the
1545 * same vnode and subspace that we (tbp) are in. We go through the
1546 * area of this list with COMPARABLE buffers (a contiguous region) and
1547 * repeated merge buffers that are contiguous and in the same block or
1548 * buffers that are contiguous and are both integral numbers of blocks.
1549 * Note that our end goal is to have as big blocks as we can, but we
1550 * must minimize the transfers that are not integral #s of blocks on
1551 * block boundaries, since Episode will do those smaller and/or
1552 * unaligned I/Os synchronously.
1554 * A useful example to consider has the async queue with this in it:
1555 * [8K block, 2 pages] [4K block, 1 page] [4K hole] [8K block, 2 pages]
1556 * If we get a request that fills the 4K hole, we want to merge this
1557 * whole mess into a 24K, 6 page transfer. If we don't, however, we
1558 * don't want to do any merging since adding the 4K transfer to the 8K
1559 * transfer makes the 8K transfer synchronous.
1561 * Note that if there are any blocks whose size is a multiple of
1562 * the file system block size, then we know that such blocks are also
1563 * on block boundaries.
1566 doMerge
= 1; /* start the loop */
1567 while (doMerge
) { /* loop until an iteration doesn't
1568 * make any more changes */
1570 for (qbp
= firstComparable
;; qbp
= qnbp
) {
1571 qnbp
= qbp
->av_forw
;
1573 break; /* we're done */
1574 if (!EFS_COMPARABLE(qbp
, qnbp
))
1577 /* try to merge qbp and qnbp */
1579 /* first check if both not adjacent go on to next region */
1580 if ((dbtob(qbp
->b_blkno
) + qbp
->b_bcount
) !=
1581 dbtob(qnbp
->b_blkno
))
1584 /* note if both in the same block, the first byte of leftmost guy
1585 * and last byte of rightmost guy are in the same block.
1587 if ((dbtob(qbp
->b_blkno
) & ~(AIX_VM_BLKSIZE
- 1)) ==
1588 ((dbtob(qnbp
->b_blkno
) + qnbp
->b_bcount
-
1589 1) & ~(AIX_VM_BLKSIZE
- 1))) {
1590 doMerge
= 1; /* both in same block */
1591 } else if ((qbp
->b_bcount
& (AIX_VM_BLKSIZE
- 1)) == 0
1592 && (qnbp
->b_bcount
& (AIX_VM_BLKSIZE
- 1)) == 0) {
1593 doMerge
= 1; /* both integral #s of blocks */
1598 /* merge both of these blocks together */
1599 /* first set age to the older of the two */
1600 if ((int32long64_t
) qnbp
->av_back
-
1601 (int32long64_t
) qbp
->av_back
< 0) {
1602 qbp
->av_back
= qnbp
->av_back
;
1604 lwbp
= (struct buf
**) &qbp
->b_work
;
1605 /* find end of qbp's work queue */
1606 for (xbp
= *lwbp
; xbp
;
1607 lwbp
= (struct buf
**) &xbp
->b_work
, xbp
= *lwbp
);
1609 * now setting *lwbp will change the last ptr in the qbp's
1612 qbp
->av_forw
= qnbp
->av_forw
; /* splice out qnbp */
1613 qbp
->b_bcount
+= qnbp
->b_bcount
; /* fix count */
1614 *lwbp
= qnbp
; /* append qnbp to end */
1616 * note that qnbp is bogus, but it doesn't matter because
1617 * we're going to restart the for loop now.
1619 break; /* out of the for loop */
1623 } /* for loop for all interrupt data */
1624 /* at this point, all I/O has been queued. Wakeup the daemon */
1625 e_wakeup_one((int *)&afs_asyncbuf_cv
);
1626 unlock_enable(oldPriority
, &afs_asyncbuf_lock
);
1632 afs_inactive(struct vcache
*avc
,
1635 afs_InactiveVCache(avc
, acred
);
1639 afs_gn_revoke(struct vnode
*vp
,
1642 struct vattr
*vinfop
,
1645 AFS_STATCNT(afs_gn_revoke
);
1646 /* NO SUPPORT for this in afs YET! */
1647 return (EOPNOTSUPP
);
1651 afs_gn_getacl(struct vnode
*vp
,
1660 afs_gn_setacl(struct vnode
*vp
,
1669 afs_gn_getpcl(struct vnode
*vp
,
1678 afs_gn_setpcl(struct vnode
*vp
,
1687 afs_gn_seek(struct vnode
* vp
, offset_t
* offp
, struct ucred
* crp
)
1690 * File systems which do not wish to do offset validation can simply
1691 * return 0. File systems which do not provide the vn_seek entry point
1692 * will have a maximum offset of OFF_MAX (2 gigabytes minus 1) enforced
1693 * by the logical file system.
1706 * declare a struct vnodeops and initialize it with ptrs to all functions
1708 struct vnodeops afs_gn_vnodeops
= {
1709 /* creation/naming/deletion */
1716 /* lookup, file handle stuff */
1718 (int(*)(struct vnode
*,struct fileid
*,struct ucred
*))
1720 /* access to files */
1721 (int(*)(struct vnode
*, int32long64_t
, ext_t
, caddr_t
*,struct ucred
*))
1723 (int(*)(struct vnode
*, struct vnode
**, int32long64_t
,caddr_t
, int32long64_t
, caddr_t
*, struct ucred
*))
1730 /* manipulate attributes of files */
1734 /* data update operations */
1747 (int(*)(struct vnode
*,struct buf
*,struct ucred
*))
1749 /* security things */
1756 (int(*)(struct vnode
*, int32long64_t
, int32long64_t
, offset_t
, offset_t
, struct ucred
*))
1757 afs_gn_enosys
, /* vn_fsync_range */
1758 (int(*)(struct vnode
*, struct vnode
**, int32long64_t
, char *, struct vattr
*, int32long64_t
, caddr_t
*, struct ucred
*))
1759 afs_gn_enosys
, /* vn_create_attr */
1760 (int(*)(struct vnode
*, int32long64_t
, void *, size_t, struct ucred
*))
1761 afs_gn_enosys
, /* vn_finfo */
1762 (int(*)(struct vnode
*, caddr_t
, offset_t
, offset_t
, uint32long64_t
, uint32long64_t
, struct ucred
*))
1763 afs_gn_enosys
, /* vn_map_lloff */
1764 (int(*)(struct vnode
*,struct uio
*,int*,struct ucred
*))
1765 afs_gn_enosys
, /* vn_readdir_eofp */
1766 (int(*)(struct vnode
*, enum uio_rw
, int32long64_t
, struct uio
*, ext_t
, caddr_t
, struct vattr
*, struct vattr
*, struct ucred
*))
1767 afs_gn_enosys
, /* vn_rdwr_attr */
1768 (int(*)(struct vnode
*,int,void*,struct ucred
*))
1769 afs_gn_enosys
, /* vn_memcntl */
1770 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
1771 (int(*)(struct vnode
*,const char*,struct uio
*,struct ucred
*))
1772 afs_gn_enosys
, /* vn_getea */
1773 (int(*)(struct vnode
*,const char*,struct uio
*,int,struct ucred
*))
1774 afs_gn_enosys
, /* vn_setea */
1775 (int(*)(struct vnode
*, struct uio
*, struct ucred
*))
1776 afs_gn_enosys
, /* vn_listea */
1777 (int(*)(struct vnode
*, const char *, struct ucred
*))
1778 afs_gn_enosys
, /* vn_removeea */
1779 (int(*)(struct vnode
*, const char *, struct vattr
*, struct ucred
*))
1780 afs_gn_enosys
, /* vn_statea */
1781 (int(*)(struct vnode
*, uint64_t, acl_type_t
*, struct uio
*, size_t *, mode_t
*, struct ucred
*))
1782 afs_gn_enosys
, /* vn_getxacl */
1783 (int(*)(struct vnode
*, uint64_t, acl_type_t
, struct uio
*, mode_t
, struct ucred
*))
1784 afs_gn_enosys
, /* vn_setxacl */
1785 #else /* AFS_AIX53_ENV */
1786 afs_gn_enosys
, /* vn_spare7 */
1787 afs_gn_enosys
, /* vn_spare8 */
1788 afs_gn_enosys
, /* vn_spare9 */
1789 afs_gn_enosys
, /* vn_spareA */
1790 afs_gn_enosys
, /* vn_spareB */
1791 afs_gn_enosys
, /* vn_spareC */
1792 afs_gn_enosys
, /* vn_spareD */
1793 #endif /* AFS_AIX53_ENV */
1794 afs_gn_enosys
, /* vn_spareE */
1795 afs_gn_enosys
/* vn_spareF */
1796 #ifdef AFS_AIX51_ENV
1797 ,(int(*)(struct gnode
*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
1798 afs_gn_enosys
, /* pagerBackRange */
1799 (int64_t(*)(struct gnode
*))
1800 afs_gn_enosys
, /* pagerGetFileSize */
1801 (void(*)(struct gnode
*, vpn_t
, vpn_t
*, vpn_t
*, vpn_t
*, boolean_t
))
1802 afs_gn_enosys
, /* pagerReadAhead */
1803 (void(*)(struct gnode
*, int64_t, int64_t, uint
))
1804 afs_gn_enosys
, /* pagerReadWriteBehind */
1805 (void(*)(struct gnode
*,long long,unsigned long,unsigned long,unsigned int))
1806 afs_gn_enosys
/* pagerEndCopy */
1809 struct vnodeops
*afs_ops
= &afs_gn_vnodeops
;
1813 extern struct vfsops Afs_vfsops
;
1814 extern int Afs_init();
1816 #define AFS_CALLOUT_TBL_SIZE 256
1819 * the following additional layer of gorp is due to the fact that the
1820 * filesystem layer no longer obtains the kernel lock for me. I was relying
1821 * on this behavior to avoid having to think about locking.
1825 vfs_mount(struct vfs
*a
, struct ucred
*b
)
1827 int glockOwner
, ret
;
1829 glockOwner
= ISAFS_GLOCK();
1832 ret
= (*Afs_vfsops
.vfs_mount
) (a
, b
);
1840 vfs_unmount(struct vfs
*a
, int b
, struct ucred
*c
)
1842 int glockOwner
, ret
;
1844 glockOwner
= ISAFS_GLOCK();
1847 ret
= (*Afs_vfsops
.vfs_unmount
) (a
, b
, c
);
1855 vfs_root(struct vfs
*a
, struct vnode
**b
, struct ucred
*c
)
1857 int glockOwner
, ret
;
1859 glockOwner
= ISAFS_GLOCK();
1862 ret
= (*Afs_vfsops
.vfs_root
) (a
, b
, c
);
1870 vfs_statfs(struct vfs
*a
, struct statfs
*b
, struct ucred
*c
)
1872 int glockOwner
, ret
;
1874 glockOwner
= ISAFS_GLOCK();
1877 ret
= (*Afs_vfsops
.vfs_statfs
) (a
, b
, c
);
1885 vfs_sync(struct gfs
*a
)
1887 int glockOwner
, ret
;
1889 glockOwner
= ISAFS_GLOCK();
1892 ret
= (*Afs_vfsops
.vfs_sync
) (a
);
1899 vfs_vget(struct vfs
*a
, struct vnode
**b
, struct fileid
*c
, struct ucred
*d
)
1901 int glockOwner
, ret
;
1903 glockOwner
= ISAFS_GLOCK();
1906 ret
= (*Afs_vfsops
.vfs_vget
) (a
, b
, c
, d
);
1914 vfs_cntl(struct vfs
*a
, int b
, caddr_t c
, size_t d
, struct ucred
*e
)
1916 int glockOwner
, ret
;
1918 glockOwner
= ISAFS_GLOCK();
1921 ret
= (*Afs_vfsops
.vfs_cntl
) (a
, b
, c
, d
, e
);
1929 vfs_quotactl(struct vfs
*a
, int b
, uid_t c
, caddr_t d
, struct ucred
*e
)
1931 int glockOwner
, ret
;
1933 glockOwner
= ISAFS_GLOCK();
1936 ret
= (*Afs_vfsops
.vfs_quotactl
) (a
, b
, c
, d
, e
);
1943 #ifdef AFS_AIX51_ENV
1945 vfs_syncvfs(struct gfs
*a
, struct vfs
*b
, int c
, struct ucred
*d
)
1947 int glockOwner
, ret
;
1949 glockOwner
= ISAFS_GLOCK();
1952 ret
= (*Afs_vfsops
.vfs_syncvfs
) (a
, b
, c
, d
);
1961 struct vfsops locked_Afs_vfsops
= {
1970 #ifdef AFS_AIX51_ENV
1976 vn_link(struct vnode
*a
, struct vnode
*b
, char *c
, struct ucred
*d
)
1978 int glockOwner
, ret
;
1980 glockOwner
= ISAFS_GLOCK();
1983 ret
= (*afs_gn_vnodeops
.vn_link
) (a
, b
, c
, d
);
1991 vn_mkdir(struct vnode
*a
, char *b
, int32long64_t c
, struct ucred
*d
)
1993 int glockOwner
, ret
;
1995 glockOwner
= ISAFS_GLOCK();
1998 ret
= (*afs_gn_vnodeops
.vn_mkdir
) (a
, b
, c
, d
);
2006 vn_mknod(struct vnode
*a
, caddr_t b
, int32long64_t c
, dev_t d
,
2009 int glockOwner
, ret
;
2011 glockOwner
= ISAFS_GLOCK();
2014 ret
= (*afs_gn_vnodeops
.vn_mknod
) (a
, b
, c
, d
, e
);
2022 vn_remove(struct vnode
*a
, struct vnode
*b
, char *c
, struct ucred
*d
)
2024 int glockOwner
, ret
;
2026 glockOwner
= ISAFS_GLOCK();
2029 ret
= (*afs_gn_vnodeops
.vn_remove
) (a
, b
, c
, d
);
2037 vn_rename(struct vnode
*a
, struct vnode
*b
, caddr_t c
, struct vnode
*d
,
2038 struct vnode
*e
, caddr_t f
, struct ucred
*g
)
2040 int glockOwner
, ret
;
2042 glockOwner
= ISAFS_GLOCK();
2045 ret
= (*afs_gn_vnodeops
.vn_rename
) (a
, b
, c
, d
, e
, f
, g
);
2053 vn_rmdir(struct vnode
*a
, struct vnode
*b
, char *c
, struct ucred
*d
)
2055 int glockOwner
, ret
;
2057 glockOwner
= ISAFS_GLOCK();
2060 ret
= (*afs_gn_vnodeops
.vn_rmdir
) (a
, b
, c
, d
);
2068 vn_lookup(struct vnode
*a
, struct vnode
**b
, char *c
, int32long64_t d
,
2069 struct vattr
*v
, struct ucred
*e
)
2071 int glockOwner
, ret
;
2073 glockOwner
= ISAFS_GLOCK();
2076 ret
= (*afs_gn_vnodeops
.vn_lookup
) (a
, b
, c
, d
, v
, e
);
2084 vn_fid(struct vnode
*a
, struct fileid
*b
, struct ucred
*c
)
2086 int glockOwner
, ret
;
2088 glockOwner
= ISAFS_GLOCK();
2091 ret
= (*afs_gn_vnodeops
.vn_fid
) (a
, b
, c
);
2099 vn_open(struct vnode
*a
,
2105 int glockOwner
, ret
;
2107 glockOwner
= ISAFS_GLOCK();
2110 ret
= (*afs_gn_vnodeops
.vn_open
) (a
, b
, c
, d
, e
);
2118 vn_create(struct vnode
*a
, struct vnode
**b
, int32long64_t c
, caddr_t d
,
2119 int32long64_t e
, caddr_t
* f
, struct ucred
*g
)
2121 int glockOwner
, ret
;
2123 glockOwner
= ISAFS_GLOCK();
2126 ret
= (*afs_gn_vnodeops
.vn_create
) (a
, b
, c
, d
, e
, f
, g
);
2134 vn_hold(struct vnode
*a
)
2136 int glockOwner
, ret
;
2138 glockOwner
= ISAFS_GLOCK();
2141 ret
= (*afs_gn_vnodeops
.vn_hold
) (a
);
2149 vn_rele(struct vnode
*a
)
2151 int glockOwner
, ret
;
2153 glockOwner
= ISAFS_GLOCK();
2156 ret
= (*afs_gn_vnodeops
.vn_rele
) (a
);
2164 vn_close(struct vnode
*a
, int32long64_t b
, caddr_t c
, struct ucred
*d
)
2166 int glockOwner
, ret
;
2168 glockOwner
= ISAFS_GLOCK();
2171 ret
= (*afs_gn_vnodeops
.vn_close
) (a
, b
, c
, d
);
2179 vn_map(struct vnode
*a
, caddr_t b
, uint32long64_t c
, uint32long64_t d
,
2180 uint32long64_t e
, struct ucred
*f
)
2182 int glockOwner
, ret
;
2184 glockOwner
= ISAFS_GLOCK();
2187 ret
= (*afs_gn_vnodeops
.vn_map
) (a
, b
, c
, d
, e
, f
);
2195 vn_unmap(struct vnode
*a
, int32long64_t b
, struct ucred
*c
)
2197 int glockOwner
, ret
;
2199 glockOwner
= ISAFS_GLOCK();
2202 ret
= (*afs_gn_vnodeops
.vn_unmap
) (a
, b
, c
);
2210 vn_access(struct vnode
*a
, int32long64_t b
, int32long64_t c
, struct ucred
*d
)
2212 int glockOwner
, ret
;
2214 glockOwner
= ISAFS_GLOCK();
2217 ret
= (*afs_gn_vnodeops
.vn_access
) (a
, b
, c
, d
);
2225 vn_getattr(struct vnode
*a
, struct vattr
*b
, struct ucred
*c
)
2227 int glockOwner
, ret
;
2229 glockOwner
= ISAFS_GLOCK();
2232 ret
= (*afs_gn_vnodeops
.vn_getattr
) (a
, b
, c
);
2240 vn_setattr(struct vnode
*a
, int32long64_t b
, int32long64_t c
, int32long64_t d
,
2241 int32long64_t e
, struct ucred
*f
)
2243 int glockOwner
, ret
;
2245 glockOwner
= ISAFS_GLOCK();
2248 ret
= (*afs_gn_vnodeops
.vn_setattr
) (a
, b
, c
, d
, e
, f
);
2256 vn_fclear(struct vnode
*a
, int32long64_t b
, offset_t c
, offset_t d
2257 , caddr_t e
, struct ucred
*f
)
2259 int glockOwner
, ret
;
2261 glockOwner
= ISAFS_GLOCK();
2264 ret
= (*afs_gn_vnodeops
.vn_fclear
) (a
, b
, c
, d
, e
, f
);
2272 vn_fsync(struct vnode
*a
, int32long64_t b
, int32long64_t c
, struct ucred
*d
)
2274 int glockOwner
, ret
;
2276 glockOwner
= ISAFS_GLOCK();
2279 ret
= (*afs_gn_vnodeops
.vn_fsync
) (a
, b
, c
, d
);
2287 vn_ftrunc(struct vnode
*a
, int32long64_t b
, offset_t c
, caddr_t d
,
2290 int glockOwner
, ret
;
2292 glockOwner
= ISAFS_GLOCK();
2295 ret
= (*afs_gn_vnodeops
.vn_ftrunc
) (a
, b
, c
, d
, e
);
2303 vn_rdwr(struct vnode
*a
, enum uio_rw b
, int32long64_t c
, struct uio
*d
,
2304 ext_t e
, caddr_t f
, struct vattr
*v
, struct ucred
*g
)
2306 int glockOwner
, ret
;
2308 glockOwner
= ISAFS_GLOCK();
2311 ret
= (*afs_gn_vnodeops
.vn_rdwr
) (a
, b
, c
, d
, e
, f
, v
, g
);
2319 vn_lockctl(struct vnode
*a
,
2324 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
2326 #else /* AFS_AIX52_ENV */
2328 #endif /* AFS_AIX52_ENV */
2331 int glockOwner
, ret
;
2333 glockOwner
= ISAFS_GLOCK();
2336 ret
= (*afs_gn_vnodeops
.vn_lockctl
) (a
, b
, c
, d
, e
, f
, g
);
2344 vn_ioctl(struct vnode
*a
, int32long64_t b
, caddr_t c
, size_t d
, ext_t e
,
2347 int glockOwner
, ret
;
2349 glockOwner
= ISAFS_GLOCK();
2352 ret
= (*afs_gn_vnodeops
.vn_ioctl
) (a
, b
, c
, d
, e
, f
);
2360 vn_readlink(struct vnode
*a
, struct uio
*b
, struct ucred
*c
)
2362 int glockOwner
, ret
;
2364 glockOwner
= ISAFS_GLOCK();
2367 ret
= (*afs_gn_vnodeops
.vn_readlink
) (a
, b
, c
);
2375 vn_select(struct vnode
*a
, int32long64_t b
, ushort c
, ushort
* d
,
2376 void (*e
) (), caddr_t f
, struct ucred
*g
)
2378 int glockOwner
, ret
;
2380 glockOwner
= ISAFS_GLOCK();
2383 ret
= (*afs_gn_vnodeops
.vn_select
) (a
, b
, c
, d
, e
, f
, g
);
2391 vn_symlink(struct vnode
*a
, char *b
, char *c
, struct ucred
*d
)
2393 int glockOwner
, ret
;
2395 glockOwner
= ISAFS_GLOCK();
2398 ret
= (*afs_gn_vnodeops
.vn_symlink
) (a
, b
, c
, d
);
2406 vn_readdir(struct vnode
*a
, struct uio
*b
, struct ucred
*c
)
2408 int glockOwner
, ret
;
2410 glockOwner
= ISAFS_GLOCK();
2413 ret
= (*afs_gn_vnodeops
.vn_readdir
) (a
, b
, c
);
2421 vn_revoke(struct vnode
*a
, int32long64_t b
, int32long64_t c
, struct vattr
*d
,
2424 int glockOwner
, ret
;
2426 glockOwner
= ISAFS_GLOCK();
2429 ret
= (*afs_gn_vnodeops
.vn_revoke
) (a
, b
, c
, d
, e
);
2437 vn_getacl(struct vnode
*a
, struct uio
*b
, struct ucred
*c
)
2439 int glockOwner
, ret
;
2441 glockOwner
= ISAFS_GLOCK();
2444 ret
= (*afs_gn_vnodeops
.vn_getacl
) (a
, b
, c
);
2452 vn_setacl(struct vnode
*a
, struct uio
*b
, struct ucred
*c
)
2454 int glockOwner
, ret
;
2456 glockOwner
= ISAFS_GLOCK();
2459 ret
= (*afs_gn_vnodeops
.vn_setacl
) (a
, b
, c
);
2467 vn_getpcl(struct vnode
*a
, struct uio
*b
, struct ucred
*c
)
2469 int glockOwner
, ret
;
2471 glockOwner
= ISAFS_GLOCK();
2474 ret
= (*afs_gn_vnodeops
.vn_getpcl
) (a
, b
, c
);
2482 vn_setpcl(struct vnode
*a
, struct uio
*b
, struct ucred
*c
)
2484 int glockOwner
, ret
;
2486 glockOwner
= ISAFS_GLOCK();
2489 ret
= (*afs_gn_vnodeops
.vn_setpcl
) (a
, b
, c
);
2497 struct vnodeops locked_afs_gn_vnodeops
= {
2526 (int(*)(struct vnode
*,struct buf
*,struct ucred
*))
2527 afs_gn_strategy
, /* no locking!!! (discovered the hard way) */
2534 (int(*)(struct vnode
*, int32long64_t
, int32long64_t
, offset_t
, offset_t
, struct ucred
*))
2535 afs_gn_enosys
, /* vn_fsync_range */
2536 (int(*)(struct vnode
*, struct vnode
**, int32long64_t
, char *, struct vattr
*, int32long64_t
, caddr_t
*, struct ucred
*))
2537 afs_gn_enosys
, /* vn_create_attr */
2538 (int(*)(struct vnode
*, int32long64_t
, void *, size_t, struct ucred
*))
2539 afs_gn_enosys
, /* vn_finfo */
2540 (int(*)(struct vnode
*, caddr_t
, offset_t
, offset_t
, uint32long64_t
, uint32long64_t
, struct ucred
*))
2541 afs_gn_enosys
, /* vn_map_lloff */
2542 (int(*)(struct vnode
*,struct uio
*,int*,struct ucred
*))
2543 afs_gn_enosys
, /* vn_readdir_eofp */
2544 (int(*)(struct vnode
*, enum uio_rw
, int32long64_t
, struct uio
*, ext_t
, caddr_t
, struct vattr
*, struct vattr
*, struct ucred
*))
2545 afs_gn_enosys
, /* vn_rdwr_attr */
2546 (int(*)(struct vnode
*,int,void*,struct ucred
*))
2547 afs_gn_enosys
, /* vn_memcntl */
2548 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
2549 (int(*)(struct vnode
*,const char*,struct uio
*,struct ucred
*))
2550 afs_gn_enosys
, /* vn_getea */
2551 (int(*)(struct vnode
*,const char*,struct uio
*,int,struct ucred
*))
2552 afs_gn_enosys
, /* vn_setea */
2553 (int(*)(struct vnode
*, struct uio
*, struct ucred
*))
2554 afs_gn_enosys
, /* vn_listea */
2555 (int(*)(struct vnode
*, const char *, struct ucred
*))
2556 afs_gn_enosys
, /* vn_removeea */
2557 (int(*)(struct vnode
*, const char *, struct vattr
*, struct ucred
*))
2558 afs_gn_enosys
, /* vn_statea */
2559 (int(*)(struct vnode
*, uint64_t, acl_type_t
*, struct uio
*, size_t *, mode_t
*, struct ucred
*))
2560 afs_gn_enosys
, /* vn_getxacl */
2561 (int(*)(struct vnode
*, uint64_t, acl_type_t
, struct uio
*, mode_t
, struct ucred
*))
2562 afs_gn_enosys
, /* vn_setxacl */
2563 #else /* AFS_AIX53_ENV */
2564 afs_gn_enosys
, /* vn_spare7 */
2565 afs_gn_enosys
, /* vn_spare8 */
2566 afs_gn_enosys
, /* vn_spare9 */
2567 afs_gn_enosys
, /* vn_spareA */
2568 afs_gn_enosys
, /* vn_spareB */
2569 afs_gn_enosys
, /* vn_spareC */
2570 afs_gn_enosys
, /* vn_spareD */
2571 #endif /* AFS_AIX53_ENV */
2572 afs_gn_enosys
, /* vn_spareE */
2573 afs_gn_enosys
/* vn_spareF */
2574 #ifdef AFS_AIX51_ENV
2575 ,(int(*)(struct gnode
*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
2576 afs_gn_enosys
, /* pagerBackRange */
2577 (int64_t(*)(struct gnode
*))
2578 afs_gn_enosys
, /* pagerGetFileSize */
2579 (void(*)(struct gnode
*, vpn_t
, vpn_t
*, vpn_t
*, vpn_t
*, boolean_t
))
2580 afs_gn_enosys
, /* pagerReadAhead */
2581 (void(*)(struct gnode
*, int64_t, int64_t, uint
))
2582 afs_gn_enosys
, /* pagerReadWriteBehind */
2583 (void(*)(struct gnode
*,long long,unsigned long,unsigned long,unsigned int))
2584 afs_gn_enosys
/* pagerEndCopy */
2588 struct gfs afs_gfs
= {
2590 &locked_afs_gn_vnodeops
,
2594 GFS_VERSION4
| GFS_VERSION42
| GFS_REMOTE
,