2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
15 * SOLARIS/osi_vnodeops.c
19 * Functions: AFS_TRYUP, _init, _info, _fini, afs_addmap, afs_delmap,
20 * afs_vmread, afs_vmwrite, afs_getpage, afs_GetOnePage, afs_putpage,
21 * afs_putapage, afs_nfsrdwr, afs_map, afs_PageLeft, afs_pathconf/afs_cntl,
22 * afs_ioctl, afs_rwlock, afs_rwunlock, afs_seek, afs_space, afs_dump,
23 * afs_cmp, afs_realvp, afs_pageio, afs_dumpctl, afs_dispose, afs_setsecattr,
24 * afs_getsecattr, gafs_open, gafs_close, gafs_getattr, gafs_setattr,
25 * gafs_access, gafs_lookup, gafs_create, gafs_remove, gafs_link,
26 * gafs_rename, gafs_mkdir, gafs_rmdir, gafs_readdir, gafs_symlink,
27 * gafs_readlink, gafs_fsync, afs_inactive, gafs_inactive, gafs_fid
30 * Variables: Afs_vnodeops
33 #include "afs/sysincludes.h" /* Standard vendor system headers */
34 #include "afsincludes.h" /* Afs-based standard headers */
35 #include "afs/afs_stats.h" /* statistics */
36 #include "afs/nfsclient.h"
45 #include <vm/seg_map.h>
46 #include <vm/seg_vn.h>
48 #if defined(AFS_SUN511_ENV)
49 #include <sys/vfs_opreg.h>
51 #include <sys/modctl.h>
52 #include <sys/syscall.h>
53 #include <sys/debug.h>
54 #include <sys/fs_subr.h>
56 /* Translate a faultcode_t as returned by some of the vm routines
57 * into a suitable errno value.
60 afs_fc2errno(faultcode_t fc
)
62 switch (FC_CODE(fc
)) {
75 extern struct as kas
; /* kernel addr space */
76 extern unsigned char *afs_indexFlags
;
77 extern afs_lock_t afs_xdcache
;
79 static int afs_nfsrdwr(struct vcache
*avc
, struct uio
*auio
, enum uio_rw arw
,
80 int ioflag
, afs_ucred_t
*acred
);
81 static int afs_GetOnePage(struct vnode
*vp
, u_offset_t off
, u_int alen
,
82 u_int
*protp
, struct page
*pl
[], u_int plsz
,
83 struct seg
*seg
, caddr_t addr
, enum seg_rw rw
,
89 afs_addmap(struct vnode
*avp
, offset_t offset
, struct as
*asp
,
90 caddr_t addr
, int length
, int prot
, int maxprot
, int flags
,
93 /* XXX What should we do here?? XXX */
98 afs_delmap(struct vnode
*avp
, offset_t offset
, struct as
*asp
,
99 caddr_t addr
, int length
, int prot
, int maxprot
, int flags
,
102 /* XXX What should we do here?? XXX */
107 #ifdef AFS_SUN510_ENV
108 afs_vmread(struct vnode
*avp
, struct uio
*auio
, int ioflag
,
109 afs_ucred_t
*acred
, caller_context_t
*ct
)
111 afs_vmread(struct vnode
*avp
, struct uio
*auio
, int ioflag
,
117 if (!RW_READ_HELD(&(VTOAFS(avp
))->rwlock
))
118 osi_Panic("afs_vmread: !rwlock");
120 code
= afs_nfsrdwr(VTOAFS(avp
), auio
, UIO_READ
, ioflag
, acred
);
127 #ifdef AFS_SUN510_ENV
128 afs_vmwrite(struct vnode
*avp
, struct uio
*auio
, int ioflag
,
129 afs_ucred_t
*acred
, caller_context_t
*ct
)
131 afs_vmwrite(struct vnode
*avp
, struct uio
*auio
, int ioflag
,
137 if (!RW_WRITE_HELD(&(VTOAFS(avp
))->rwlock
))
138 osi_Panic("afs_vmwrite: !rwlock");
140 code
= afs_nfsrdwr(VTOAFS(avp
), auio
, UIO_WRITE
, ioflag
, acred
);
146 afs_getpage(struct vnode
*vp
, offset_t off
, u_int len
, u_int
*protp
,
147 struct page
*pl
[], u_int plsz
, struct seg
*seg
, caddr_t addr
,
148 enum seg_rw rw
, afs_ucred_t
*acred
)
151 AFS_STATCNT(afs_getpage
);
153 if (vp
->v_flag
& VNOMAP
) /* File doesn't allow mapping */
160 afs_GetOnePage(vp
, off
, len
, protp
, pl
, plsz
, seg
, addr
, rw
, acred
);
162 struct multiPage_range range
;
163 struct vcache
*vcp
= VTOAFS(vp
);
165 /* We've been asked to get more than one page. We must return all
166 * requested pages at once, all of them locked, which means all of
167 * these dcache entries cannot be kicked out of the cache before we
168 * return (since their pages cannot be invalidated).
170 * afs_GetOnePage will be called multiple times by pvn_getpages in
171 * order to get all of the requested pages. One of the later
172 * afs_GetOnePage calls may need to evict some cache entries in order
173 * to perform its read. If we try to kick out one of the entries an
174 * earlier afs_GetOnePage call used, we will deadlock since we have
175 * the page locked. So, to tell afs_GetDownD that it should skip over
176 * any entries we've read in due to this afs_getpage call, record the
177 * offset and length in avc->multiPage.
179 * Ideally we would just set something in each dcache as we get it,
180 * but that is rather difficult, since pvn_getpages doesn't let us
181 * retain any information between calls to afs_GetOnePage. So instead
182 * just record the offset and length, and let afs_GetDownD calculate
183 * which dcache entries should be skipped. */
188 ObtainWriteLock(&vcp
->vlock
, 548);
189 QAdd(&vcp
->multiPage
, &range
.q
);
190 ReleaseWriteLock(&vcp
->vlock
);
192 pvn_getpages(afs_GetOnePage
, vp
, off
, len
, protp
, pl
, plsz
, seg
, addr
, rw
, acred
);
193 ObtainWriteLock(&vcp
->vlock
, 549);
195 ReleaseWriteLock(&vcp
->vlock
);
201 /* Return all the pages from [off..off+len) in file */
203 afs_GetOnePage(struct vnode
*vp
, u_offset_t off
, u_int alen
, u_int
*protp
,
204 struct page
*pl
[], u_int plsz
, struct seg
*seg
, caddr_t addr
,
205 enum seg_rw rw
, afs_ucred_t
*acred
)
216 afs_size_t offset
, nlen
= 0;
217 struct vrequest treq
;
218 afs_int32 mapForRead
= 0, Code
= 0;
222 osi_Panic("GetOnePage: !acred");
224 avc
= VTOAFS(vp
); /* cast to afs vnode */
226 if (avc
->credp
/*&& AFS_NFSXLATORREQ(acred) */
227 && AFS_NFSXLATORREQ(avc
->credp
)) {
230 if (code
= afs_InitReq(&treq
, acred
))
234 /* This is a read-ahead request, e.g. due to madvise. */
236 ObtainReadLock(&avc
->lock
);
238 while (plen
> 0 && !afs_BBusy()) {
239 /* Obtain a dcache entry at off. 2 means don't fetch data. */
241 afs_GetDCache(avc
, (afs_offs_t
) off
, &treq
, &offset
, &nlen
,
246 /* Write-lock the dcache entry, if we don't succeed, just go on */
247 if (0 != NBObtainWriteLock(&tdc
->lock
, 642)) {
252 /* If we aren't already fetching this dcache entry, queue it */
253 if (!(tdc
->mflags
& DFFetchReq
)) {
256 tdc
->mflags
|= DFFetchReq
;
257 bp
= afs_BQueue(BOP_FETCH
, avc
, B_DONTWAIT
, 0, acred
,
258 (afs_size_t
) off
, (afs_size_t
) 1, tdc
,
259 (void *)0, (void *)0);
261 /* Unable to start background fetch; might as well stop */
262 tdc
->mflags
&= ~DFFetchReq
;
263 ReleaseWriteLock(&tdc
->lock
);
267 ReleaseWriteLock(&tdc
->lock
);
269 ReleaseWriteLock(&tdc
->lock
);
274 /* Adjust our offset and remaining length values */
278 /* If we aren't making progress for some reason, bail out */
283 ReleaseReadLock(&avc
->lock
);
288 pl
[0] = NULL
; /* Make sure it's empty */
290 /* first, obtain the proper lock for the VM system */
292 /* if this is a read request, map the page in read-only. This will
293 * allow us to swap out the dcache entry if there are only read-only
294 * pages created for the chunk, which helps a *lot* when dealing
295 * with small caches. Otherwise, we have to invalidate the vm
296 * pages for the range covered by a chunk when we swap out the
299 if (rw
== S_READ
|| rw
== S_EXEC
)
306 if (rw
== S_WRITE
|| rw
== S_CREATE
)
307 tdc
= afs_GetDCache(avc
, (afs_offs_t
) off
, &treq
, &offset
, &nlen
, 5);
309 tdc
= afs_GetDCache(avc
, (afs_offs_t
) off
, &treq
, &offset
, &nlen
, 1);
311 return afs_CheckCode(EINVAL
, &treq
, 62);
312 code
= afs_VerifyVCache(avc
, &treq
);
315 return afs_CheckCode(code
, &treq
, 44); /* failed to get it */
318 ObtainReadLock(&avc
->lock
);
320 afs_Trace4(afs_iclSetp
, CM_TRACE_PAGEIN
, ICL_TYPE_POINTER
, (afs_int32
) vp
,
321 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(off
), ICL_TYPE_LONG
, len
,
322 ICL_TYPE_LONG
, (int)rw
);
327 /* Check to see if we're in the middle of a VM purge, and if we are, release
328 * the locks and try again when the VM purge is done. */
329 ObtainWriteLock(&avc
->vlock
, 550);
331 ReleaseReadLock(&avc
->lock
);
332 ReleaseWriteLock(&avc
->vlock
);
334 /* Check activeV again, it may have been turned off
335 * while we were waiting for a lock in afs_PutDCache */
336 ObtainWriteLock(&avc
->vlock
, 574);
338 avc
->vstates
|= VRevokeWait
;
339 ReleaseWriteLock(&avc
->vlock
);
340 afs_osi_Sleep(&avc
->vstates
);
342 ReleaseWriteLock(&avc
->vlock
);
346 ReleaseWriteLock(&avc
->vlock
);
348 /* We're about to do stuff with our dcache entry.. Lock it. */
349 ObtainReadLock(&tdc
->lock
);
351 /* Check to see whether the cache entry is still valid */
352 if (!(avc
->f
.states
& CStatd
)
353 || !hsame(avc
->f
.m
.DataVersion
, tdc
->f
.versionNo
)) {
354 ReleaseReadLock(&tdc
->lock
);
355 ReleaseReadLock(&avc
->lock
);
361 while (1) { /* loop over all pages */
362 /* now, try to find the page in memory (it may already be intransit or laying
363 * around the free list */
365 page_lookup(vp
, toffset
, (rw
== S_CREATE
? SE_EXCL
: SE_SHARED
));
369 /* if we make it here, we can't find the page in memory. Do a real disk read
370 * from the cache to get the data */
371 Code
|= 0x200; /* XXX */
372 /* use PG_EXCL because we know the page does not exist already. If it
373 * actually does exist, we have somehow raced between lookup and create.
374 * As of 4/98, that shouldn't be possible, but we'll be defensive here
375 * in case someone tries to relax all the serialization of read and write
376 * operations with harmless things like stat. */
378 page_create_va(vp
, toffset
, PAGESIZE
, PG_WAIT
| PG_EXCL
, seg
,
384 pagezero(page
, alen
, PAGESIZE
- alen
);
386 if (rw
== S_CREATE
) {
387 /* XXX Don't read from AFS in write only cases XXX */
388 page_io_unlock(page
);
391 /* now it is time to start I/O operation */
392 buf
= pageio_setup(page
, PAGESIZE
, vp
, B_READ
); /* allocate a buf structure */
395 buf
->b_lblkno
= lbtodb(toffset
);
396 bp_mapin(buf
); /* map it in to our address space */
399 /* afs_ustrategy will want to lock the dcache entry */
400 ReleaseReadLock(&tdc
->lock
);
401 code
= afs_ustrategy(buf
, acred
); /* do the I/O */
402 ObtainReadLock(&tdc
->lock
);
405 /* Before freeing unmap the buffer */
411 page_io_unlock(page
);
414 /* come here when we have another page (already held) to enter */
416 /* put page in array and continue */
417 /* The p_selock must be downgraded to a shared lock after the page is read */
418 if ((rw
!= S_CREATE
) && !(PAGE_SHARED(page
))) {
419 page_downgrade(page
);
422 code
= page_iolock_assert(page
);
428 break; /* done all the pages */
429 } /* while (1) ... */
433 ReleaseReadLock(&tdc
->lock
);
435 /* Prefetch next chunk if we're at a chunk boundary */
436 if (AFS_CHUNKOFFSET(off
) == 0) {
437 if (!(tdc
->mflags
& DFNextStarted
))
438 afs_PrefetchChunk(avc
, tdc
, acred
, &treq
);
441 ReleaseReadLock(&avc
->lock
);
442 ObtainWriteLock(&afs_xdcache
, 246);
444 /* track that we have dirty (or dirty-able) pages for this chunk. */
445 afs_indexFlags
[tdc
->index
] |= IFDirtyPages
;
447 afs_indexFlags
[tdc
->index
] |= IFAnyPages
;
448 ReleaseWriteLock(&afs_xdcache
);
450 afs_Trace3(afs_iclSetp
, CM_TRACE_PAGEINDONE
, ICL_TYPE_LONG
, code
,
451 ICL_TYPE_LONG
, (int)page
, ICL_TYPE_LONG
, Code
);
456 afs_Trace3(afs_iclSetp
, CM_TRACE_PAGEINDONE
, ICL_TYPE_LONG
, code
,
457 ICL_TYPE_LONG
, (int)page
, ICL_TYPE_LONG
, Code
);
458 /* release all pages, drop locks, return code */
460 pvn_read_done(page
, B_ERROR
);
461 ReleaseReadLock(&avc
->lock
);
462 ReleaseReadLock(&tdc
->lock
);
468 * Dummy pvn_vplist_dirty() handler for non-writable vnodes.
471 afs_never_putapage(struct vnode
*vp
, struct page
*pages
, u_offset_t
* offp
,
472 size_t * lenp
, int flags
, afs_ucred_t
*credp
)
474 struct vcache
*avc
= VTOAFS(vp
);
475 osi_Assert((avc
->f
.states
& CRO
) != 0);
476 osi_Panic("Dirty pages while flushing a read-only volume vnode.");
477 return EIO
; /* unreachable */
481 afs_putpage(struct vnode
*vp
, offset_t off
, u_int len
, int flags
,
489 afs_int32 NPages
= 0;
490 u_offset_t toff
= off
;
493 AFS_STATCNT(afs_putpage
);
494 if (vp
->v_flag
& VNOMAP
) /* file doesn't allow mapping */
498 * Putpage (ASYNC) is called every sec to flush out dirty vm pages
501 afs_Trace4(afs_iclSetp
, CM_TRACE_PAGEOUT
, ICL_TYPE_POINTER
,
502 (afs_int32
) vp
, ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(off
),
503 ICL_TYPE_INT32
, (afs_int32
) len
, ICL_TYPE_LONG
, (int)flags
);
506 /* Get a list of modified (or whatever) pages */
508 ObtainSharedLock(&avc
->lock
, 247);
509 didLock
= SHARED_LOCK
;
510 endPos
= (afs_offs_t
) off
+ len
; /* position we're supposed to write up to */
511 while ((afs_offs_t
) toff
< endPos
512 && (afs_offs_t
) toff
< avc
->f
.m
.Length
) {
513 /* If not invalidating pages use page_lookup_nowait to avoid reclaiming
514 * them from the free list
517 if (flags
& (B_FREE
| B_INVAL
))
518 pages
= page_lookup(vp
, toff
, SE_EXCL
);
520 pages
= page_lookup_nowait(vp
, toff
, SE_SHARED
);
521 if (!pages
|| !pvn_getdirty(pages
, flags
))
524 if (didLock
== SHARED_LOCK
) {
526 didLock
= WRITE_LOCK
;
527 UpgradeSToWLock(&avc
->lock
, 671);
531 code
= afs_putapage(vp
, pages
, &toff
, &tlen
, flags
, cred
);
542 * We normally arrive here due to a vm flush.
544 * If this vnode belongs to a writable volume, obtain a vcache lock
545 * then call pvn_vplist_dirty to free, invalidate, or to write out
546 * dirty pages with afs_putapage. The afs_putapage routine requires a
547 * vcache lock, so we obtain it here before any page locks are taken.
548 * This locking order is done to avoid deadlocking due to races with
549 * afs_getpage, which also takes vcache and page locks.
551 * If this vnode belongs to a non-writable volume, then it will not
552 * contain dirty pages, so we do not need to lock the vcache and since
553 * afs_putapage will not be called. Instead, forgo the vcache lock and
554 * call pvn_vplist_dirty to free, or invalidate pages. Pass a dummy
555 * page out handler to pvn_vplist_dirty which we do not expect to be
556 * called. Panic if the dummy handler is called, since something went
559 if ((avc
->f
.states
& CRO
) == 0) {
560 ObtainWriteLock(&avc
->lock
, 670);
561 didLock
= WRITE_LOCK
;
564 if ((avc
->f
.states
& CRO
) == 0)
565 code
= pvn_vplist_dirty(vp
, toff
, afs_putapage
, flags
, cred
);
567 code
= pvn_vplist_dirty(vp
, toff
, afs_never_putapage
, flags
, cred
);
571 if (code
&& !avc
->vc_error
) {
573 ObtainWriteLock(&avc
->lock
, 668);
574 didLock
= WRITE_LOCK
;
575 } else if (didLock
== SHARED_LOCK
) {
576 UpgradeSToWLock(&avc
->lock
, 669);
577 didLock
= WRITE_LOCK
;
579 avc
->vc_error
= code
;
582 if (didLock
== WRITE_LOCK
)
583 ReleaseWriteLock(&avc
->lock
);
584 else if (didLock
== SHARED_LOCK
)
585 ReleaseSharedLock(&avc
->lock
);
586 afs_Trace2(afs_iclSetp
, CM_TRACE_PAGEOUTDONE
, ICL_TYPE_LONG
, code
,
587 ICL_TYPE_LONG
, NPages
);
593 afs_putapage(struct vnode
*vp
, struct page
*pages
, u_offset_t
* offp
,
594 size_t * lenp
, int flags
, afs_ucred_t
*credp
)
597 struct vcache
*avc
= VTOAFS(vp
);
599 u_int tlen
= PAGESIZE
;
600 afs_offs_t off
= (pages
->p_offset
/ PAGESIZE
) * PAGESIZE
;
603 * Now we've got the modified pages. All pages are locked and held
604 * XXX Find a kluster that fits in one block (or page). We also
605 * adjust the i/o if the file space is less than a while page. XXX
607 if (off
+ tlen
> avc
->f
.m
.Length
) {
608 tlen
= avc
->f
.m
.Length
- off
;
610 /* can't call mapout with 0 length buffers (rmfree panics) */
611 if (((tlen
>> 24) & 0xff) == 0xff) {
616 * Can't call mapout with 0 length buffers since we'll get rmfree panics
618 tbuf
= pageio_setup(pages
, tlen
, vp
, B_WRITE
| flags
);
623 tbuf
->b_lblkno
= lbtodb(pages
->p_offset
);
626 afs_Trace4(afs_iclSetp
, CM_TRACE_PAGEOUTONE
, ICL_TYPE_LONG
, avc
,
627 ICL_TYPE_LONG
, pages
, ICL_TYPE_LONG
, tlen
, ICL_TYPE_OFFSET
,
628 ICL_HANDLE_OFFSET(off
));
629 code
= afs_ustrategy(tbuf
, credp
); /* unlocks page */
633 pvn_write_done(pages
, ((code
) ? B_ERROR
: 0) | B_WRITE
| flags
);
644 afs_nfsrdwr(struct vcache
*avc
, struct uio
*auio
, enum uio_rw arw
,
645 int ioflag
, afs_ucred_t
*acred
)
649 afs_int32 code_checkcode
= 0;
651 afs_int32 mode
, sflags
;
653 struct dcache
*dcp
, *dcp_newpage
;
654 afs_size_t fileBase
, size
;
657 afs_int32 pageOffset
, extraResid
= 0;
658 afs_size_t origLength
; /* length when reading/writing started */
659 long appendLength
; /* length when this call will finish */
660 int created
; /* created pages instead of faulting them */
662 int didFakeOpen
, eof
;
663 struct vrequest treq
;
667 AFS_STATCNT(afs_nfsrdwr
);
669 /* can't read or write other things */
670 if (vType(avc
) != VREG
)
673 if (auio
->uio_resid
== 0)
676 afs_Trace4(afs_iclSetp
, CM_TRACE_VMRW
, ICL_TYPE_POINTER
, (afs_int32
) avc
,
677 ICL_TYPE_LONG
, (arw
== UIO_WRITE
? 1 : 0), ICL_TYPE_OFFSET
,
678 ICL_HANDLE_OFFSET(auio
->uio_loffset
), ICL_TYPE_OFFSET
,
679 ICL_HANDLE_OFFSET(auio
->uio_resid
));
681 #ifndef AFS_64BIT_CLIENT
682 if (AfsLargeFileUio(auio
)) /* file is larger than 2 GB */
687 osi_Panic("rdwr: !acred");
689 if (code
= afs_InitReq(&treq
, acred
))
692 /* It's not really possible to know if a write cause a growth in the
693 * cache size, we we wait for a cache drain for any write.
695 afs_MaybeWakeupTruncateDaemon();
696 while ((arw
== UIO_WRITE
)
697 && (afs_blocksUsed
> PERCENT(CM_WAITFORDRAINPCT
, afs_cacheBlocks
))) {
698 if (afs_blocksUsed
- afs_blocksDiscarded
>
699 PERCENT(CM_WAITFORDRAINPCT
, afs_cacheBlocks
)) {
700 afs_WaitForCacheDrain
= 1;
701 afs_osi_Sleep(&afs_WaitForCacheDrain
);
703 afs_MaybeFreeDiscardedDCache();
704 afs_MaybeWakeupTruncateDaemon();
706 code
= afs_VerifyVCache(avc
, &treq
);
708 return afs_CheckCode(code
, &treq
, 45);
710 osi_FlushPages(avc
, acred
);
712 ObtainWriteLock(&avc
->lock
, 250);
714 /* adjust parameters when appending files */
715 if ((ioflag
& IO_APPEND
) && arw
== UIO_WRITE
) {
716 auio
->uio_loffset
= avc
->f
.m
.Length
; /* write at EOF position */
718 if (auio
->afsio_offset
< 0 || (auio
->afsio_offset
+ auio
->uio_resid
) < 0) {
719 ReleaseWriteLock(&avc
->lock
);
722 #ifndef AFS_64BIT_CLIENT
723 /* file is larger than 2GB */
724 if (AfsLargeFileSize(auio
->uio_offset
, auio
->uio_resid
)) {
725 ReleaseWriteLock(&avc
->lock
);
730 didFakeOpen
= 0; /* keep track of open so we can do close */
731 if (arw
== UIO_WRITE
) {
732 /* do ulimit processing; shrink resid or fail */
733 if (auio
->uio_loffset
+ auio
->afsio_resid
> auio
->uio_llimit
) {
734 if (auio
->uio_loffset
>= auio
->uio_llimit
) {
735 ReleaseWriteLock(&avc
->lock
);
738 /* track # of bytes we should write, but won't because of
739 * ulimit; we must add this into the final resid value
740 * so caller knows we punted some data.
742 extraResid
= auio
->uio_resid
;
743 auio
->uio_resid
= auio
->uio_llimit
- auio
->uio_loffset
;
744 extraResid
-= auio
->uio_resid
;
747 mode
= S_WRITE
; /* segment map-in mode */
748 afs_FakeOpen(avc
); /* do this for writes, so data gets put back
749 * when we want it to be put back */
750 didFakeOpen
= 1; /* we'll be doing a fake open */
751 /* before starting any I/O, we must ensure that the file is big enough
752 * to hold the results (since afs_putpage will be called to force the I/O */
753 size
= auio
->afsio_resid
+ auio
->afsio_offset
; /* new file size */
755 origLength
= avc
->f
.m
.Length
;
756 if (size
> avc
->f
.m
.Length
) {
757 afs_Trace4(afs_iclSetp
, CM_TRACE_SETLENGTH
, ICL_TYPE_STRING
,
758 __FILE__
, ICL_TYPE_LONG
, __LINE__
, ICL_TYPE_OFFSET
,
759 ICL_HANDLE_OFFSET(avc
->f
.m
.Length
), ICL_TYPE_OFFSET
,
760 ICL_HANDLE_OFFSET(size
));
761 avc
->f
.m
.Length
= size
; /* file grew */
763 avc
->f
.states
|= CDirty
; /* Set the dirty bit */
764 avc
->f
.m
.Date
= osi_Time(); /* Set file date (for ranlib) */
766 mode
= S_READ
; /* map-in read-only */
767 origLength
= avc
->f
.m
.Length
;
770 if (acred
&& AFS_NFSXLATORREQ(acred
)) {
771 if (arw
== UIO_READ
) {
773 (avc
, PRSFS_READ
, &treq
,
774 CHECK_MODE_BITS
| CMB_ALLOW_EXEC_AS_READ
)) {
775 ReleaseWriteLock(&avc
->lock
);
785 counter
= 0; /* don't call afs_DoPartialWrite first time through. */
787 /* compute the amount of data to move into this block,
788 * based on auio->afsio_resid. Note that we copy data in units of
789 * MAXBSIZE, not PAGESIZE. This is because segmap_getmap panics if you
790 * call it with an offset based on blocks smaller than MAXBSIZE
791 * (implying that it should be named BSIZE, since it is clearly both a
793 size
= auio
->afsio_resid
; /* transfer size */
794 fileBase
= ((arw
== UIO_READ
) && (origLength
< auio
->uio_offset
)) ?
795 origLength
: auio
->afsio_offset
; /* start file position for xfr */
796 pageBase
= fileBase
& ~(MAXBSIZE
- 1); /* file position of the page */
797 pageOffset
= fileBase
& (MAXBSIZE
- 1); /* xfr start's offset within page */
798 tsize
= MAXBSIZE
- pageOffset
; /* how much more fits in this page */
799 /* we'll read tsize bytes, but first must make sure tsize isn't too big */
801 tsize
= size
; /* don't read past end of request */
802 eof
= 0; /* flag telling us if we hit the EOF on the read */
803 if (arw
== UIO_READ
) { /* we're doing a read operation */
804 /* don't read past EOF */
805 if (fileBase
+ tsize
> origLength
) {
806 tsize
= origLength
- fileBase
;
807 eof
= 1; /* we did hit the EOF */
809 tsize
= 0; /* better safe than sorry */
813 /* Purge dirty chunks of file if there are too many dirty
814 * chunks. Inside the write loop, we only do this at a chunk
815 * boundary. Clean up partial chunk if necessary at end of loop.
817 if (counter
> 0 && code
== 0 && AFS_CHUNKOFFSET(fileBase
) == 0) {
818 code
= afs_DoPartialWrite(avc
, &treq
);
822 /* write case, we ask segmap_release to call putpage. Really, we
823 * don't have to do this on every page mapin, but for now we're
824 * lazy, and don't modify the rest of AFS to scan for modified
825 * pages on a close or other "synchronize with file server"
826 * operation. This makes things a little cleaner, but probably
827 * hurts performance. */
832 break; /* nothing to transfer, we're done */
834 if (arw
== UIO_WRITE
)
835 avc
->f
.states
|= CDirty
; /* may have been cleared by DoPartialWrite */
837 /* Before dropping lock, hold the chunk (create it if necessary). This
838 * serves two purposes: (1) Ensure Cache Truncate Daemon doesn't try
839 * to purge the chunk's pages while we have them locked. This would
840 * cause deadlock because we might be waiting for the CTD to free up
841 * a chunk. (2) If we're writing past the original EOF, and we're
842 * at the base of the chunk, then make sure it exists online
843 * before we do the uiomove, since the segmap_release will
844 * write out to the chunk, causing it to get fetched if it hasn't
845 * been created yet. The code that would otherwise notice that
846 * we're fetching a chunk past EOF won't work, since we've
847 * already adjusted the file size above.
849 ObtainWriteLock(&avc
->vlock
, 551);
850 while (avc
->vstates
& VPageCleaning
) {
851 ReleaseWriteLock(&avc
->vlock
);
852 ReleaseWriteLock(&avc
->lock
);
853 afs_osi_Sleep(&avc
->vstates
);
854 ObtainWriteLock(&avc
->lock
, 334);
855 ObtainWriteLock(&avc
->vlock
, 552);
857 ReleaseWriteLock(&avc
->vlock
);
859 afs_size_t toff
, tlen
;
860 dcp
= afs_GetDCache(avc
, fileBase
, &treq
, &toff
, &tlen
, 2);
866 ReleaseWriteLock(&avc
->lock
); /* uiomove may page fault */
868 data
= segmap_getmap(segkmap
, AFSTOV(avc
), (u_offset_t
) pageBase
);
869 raddr
= (caddr_t
) (((uintptr_t) data
+ pageOffset
) & PAGEMASK
);
871 (((u_int
) data
+ pageOffset
+ tsize
+ PAGEOFFSET
) & PAGEMASK
) -
874 /* if we're doing a write, and we're starting at the rounded
875 * down page base, and we're writing enough data to cover all
876 * created pages, then we must be writing all of the pages
877 * in this MAXBSIZE window that we're creating.
880 if (arw
== UIO_WRITE
&& ((long)raddr
== (long)data
+ pageOffset
)
882 /* probably the dcache backing this guy is around, but if
883 * not, we can't do this optimization, since we're creating
884 * writable pages, which must be backed by a chunk.
887 dcp_newpage
= afs_FindDCache(avc
, pageBase
);
889 && hsame(avc
->f
.m
.DataVersion
, dcp_newpage
->f
.versionNo
)) {
890 ObtainWriteLock(&avc
->lock
, 251);
891 ObtainWriteLock(&avc
->vlock
, 576);
892 ObtainReadLock(&dcp_newpage
->lock
);
893 if ((avc
->activeV
== 0)
894 && hsame(avc
->f
.m
.DataVersion
, dcp_newpage
->f
.versionNo
)
895 && !(dcp_newpage
->dflags
& (DFFetching
))) {
897 segmap_pagecreate(segkmap
, raddr
, rsize
, 1);
899 ObtainWriteLock(&afs_xdcache
, 252);
900 /* Mark the pages as created and dirty */
901 afs_indexFlags
[dcp_newpage
->index
]
902 |= (IFAnyPages
| IFDirtyPages
);
903 ReleaseWriteLock(&afs_xdcache
);
906 ReleaseReadLock(&dcp_newpage
->lock
);
907 afs_PutDCache(dcp_newpage
);
908 ReleaseWriteLock(&avc
->vlock
);
909 ReleaseWriteLock(&avc
->lock
);
910 } else if (dcp_newpage
)
911 afs_PutDCache(dcp_newpage
);
916 afs_fc2errno(segmap_fault
917 (kas
.a_hat
, segkmap
, raddr
, rsize
,
921 AFS_UIOMOVE(data
+ pageOffset
, tsize
, arw
, auio
, code
);
922 segmap_fault(kas
.a_hat
, segkmap
, raddr
, rsize
, F_SOFTUNLOCK
,
926 code
= segmap_release(segkmap
, data
, sflags
);
928 (void)segmap_release(segkmap
, data
, 0);
931 ObtainWriteLock(&avc
->lock
, 253);
939 afs_FakeClose(avc
, acred
);
941 if (arw
== UIO_WRITE
&& (avc
->f
.states
& CDirty
)) {
942 code2
= afs_DoPartialWrite(avc
, &treq
);
947 if (!code
&& avc
->vc_error
) {
948 code
= code_checkcode
= avc
->vc_error
;
950 ReleaseWriteLock(&avc
->lock
);
952 if ((ioflag
& FSYNC
) && (arw
== UIO_WRITE
)
953 && !AFS_NFSXLATORREQ(acred
))
954 code
= afs_fsync(avc
, 0, acred
);
957 * If things worked, add in as remaining in request any bytes
958 * we didn't write due to file size ulimit.
960 if (code
== 0 && extraResid
> 0)
961 auio
->uio_resid
+= extraResid
;
962 if (code_checkcode
) {
963 return code_checkcode
;
965 return afs_CheckCode(code
, &treq
, 46);
970 afs_map(struct vnode
*vp
, offset_t off
, struct as
*as
, caddr_t
*addr
, size_t len
, u_char prot
, u_char maxprot
, u_int flags
, afs_ucred_t
*cred
)
972 struct segvn_crargs crargs
;
974 struct vrequest treq
;
975 struct vcache
*avc
= VTOAFS(vp
);
977 AFS_STATCNT(afs_map
);
980 /* check for reasonableness on segment bounds; apparently len can be < 0 */
981 if (off
< 0 || off
+ len
< 0) {
984 #ifndef AFS_64BIT_CLIENT
985 if (AfsLargeFileSize(off
, len
)) { /* file is larger than 2 GB */
991 if (vp
->v_flag
& VNOMAP
) /* File isn't allowed to be mapped */
994 if (vp
->v_filocks
) /* if locked, disallow mapping */
998 if (code
= afs_InitReq(&treq
, cred
))
1001 if (vp
->v_type
!= VREG
) {
1006 code
= afs_VerifyVCache(avc
, &treq
);
1010 osi_FlushPages(avc
, cred
); /* ensure old pages are gone */
1011 avc
->f
.states
|= CMAPPED
; /* flag cleared at afs_inactive */
1015 if ((flags
& MAP_FIXED
) == 0) {
1016 #ifdef MAPADDR_LACKS_VACALIGN
1017 map_addr(addr
, len
, off
, flags
);
1019 map_addr(addr
, len
, off
, 1, flags
);
1021 if (*addr
== NULL
) {
1027 (void)as_unmap(as
, *addr
, len
); /* unmap old address space use */
1028 /* setup the create parameter block for the call */
1029 crargs
.vp
= AFSTOV(avc
);
1030 crargs
.offset
= (u_offset_t
)off
;
1032 crargs
.type
= flags
& MAP_TYPE
;
1034 crargs
.maxprot
= maxprot
;
1035 crargs
.amp
= (struct anon_map
*)0;
1036 crargs
.flags
= flags
& ~MAP_TYPE
;
1038 code
= as_map(as
, *addr
, len
, segvn_create
, (char *)&crargs
);
1042 code
= afs_CheckCode(code
, &treq
, 47);
1046 code
= afs_CheckCode(code
, &treq
, 48);
1053 * For Now We use standard local kernel params for AFS system values. Change this
1057 #ifdef AFS_SUN511_ENV
1058 afs_pathconf(struct vnode
*vp
, int cmd
, u_long
*outdatap
,
1059 afs_ucred_t
*credp
, caller_context_t
*ct
)
1061 afs_pathconf(struct vnode
*vp
, int cmd
, u_long
*outdatap
,
1063 #endif /* AFS_SUN511_ENV */
1065 AFS_STATCNT(afs_cntl
);
1068 *outdatap
= MAXLINK
;
1071 *outdatap
= MAXNAMLEN
;
1074 *outdatap
= MAXPATHLEN
;
1076 case _PC_CHOWN_RESTRICTED
:
1082 case _PC_FILESIZEBITS
:
1083 #ifdef AFS_64BIT_CLIENT
1090 #ifdef AFS_SUN511_ENV
1091 return fs_pathconf(vp
, cmd
, outdatap
, credp
, ct
);
1093 return fs_pathconf(vp
, cmd
, outdatap
, credp
);
1094 #endif /* AFS_SUN511_ENV */
1100 afs_ioctl(struct vnode
*vnp
, int com
, int arg
, int flag
, cred_t
*credp
,
1107 afs_rwlock(struct vnode
*vnp
, int wlock
)
1109 rw_enter(&(VTOAFS(vnp
))->rwlock
, (wlock
? RW_WRITER
: RW_READER
));
1114 afs_rwunlock(struct vnode
*vnp
, int wlock
)
1116 rw_exit(&(VTOAFS(vnp
))->rwlock
);
1122 afs_seek(struct vnode
*vnp
, offset_t ooff
, offset_t
*noffp
)
1126 #ifndef AFS_64BIT_CLIENT
1127 # define __MAXOFF_T MAXOFF_T
1129 # define __MAXOFF_T MAXOFFSET_T
1132 if ((*noffp
< 0 || *noffp
> __MAXOFF_T
))
1138 #ifdef AFS_SUN59_ENV
1139 afs_frlock(struct vnode
*vnp
, int cmd
, struct flock64
*ap
, int flag
,
1140 offset_t off
, struct flk_callback
*flkcb
, afs_ucred_t
*credp
)
1142 afs_frlock(struct vnode
*vnp
, int cmd
, struct flock64
*ap
, int flag
,
1143 offset_t off
, afs_ucred_t
*credp
)
1148 * Implement based on afs_lockctl
1151 #ifdef AFS_SUN59_ENV
1153 afs_warn("Don't know how to deal with flk_callback's!\n");
1155 if ((cmd
== F_GETLK
) || (cmd
== F_O_GETLK
) || (cmd
== F_SETLK
)
1156 || (cmd
== F_SETLKW
)) {
1157 ap
->l_pid
= ttoproc(curthread
)->p_pid
;
1161 code
= convoff(vnp
, ap
, 0, off
);
1167 code
= afs_lockctl(VTOAFS(vnp
), ap
, cmd
, credp
);
1174 afs_space(struct vnode
*vnp
, int cmd
, struct flock64
*ap
, int flag
,
1175 offset_t off
, afs_ucred_t
*credp
)
1177 afs_int32 code
= EINVAL
;
1180 if ((cmd
== F_FREESP
)
1181 && ((code
= convoff(vnp
, ap
, 0, off
)) == 0)) {
1184 vattr
.va_mask
= AT_SIZE
;
1185 vattr
.va_size
= ap
->l_start
;
1186 code
= afs_setattr(VTOAFS(vnp
), &vattr
, 0, credp
);
1194 afs_dump(struct vnode
*vp
, caddr_t addr
, int i1
, int i2
)
1196 AFS_STATCNT(afs_dump
);
1197 afs_warn("AFS_DUMP. MUST IMPLEMENT THIS!!!\n");
1202 /* Nothing fancy here; just compare if vnodes are identical ones */
1204 afs_cmp(struct vnode
*vp1
, struct vnode
*vp2
)
1206 AFS_STATCNT(afs_cmp
);
1207 return (vp1
== vp2
);
1212 afs_realvp(struct vnode
*vp
, struct vnode
**vpp
)
1214 AFS_STATCNT(afs_realvp
);
1220 afs_pageio(struct vnode
*vp
, struct page
*pp
, u_int ui1
, u_int ui2
, int i1
,
1223 afs_warn("afs_pageio: Not implemented\n");
1228 #ifdef AFS_SUN59_ENV
1229 afs_dumpctl(struct vnode
*vp
, int i
, int *blkp
)
1231 afs_dumpctl(struct vnode
*vp
, int i
)
1234 afs_warn("afs_dumpctl: Not implemented\n");
1238 #ifdef AFS_SUN511_ENV
1240 afs_dispose(struct vnode
*vp
, struct page
*p
, int fl
, int dn
, struct cred
*cr
, struct caller_context_t
*ct
)
1242 fs_dispose(vp
, p
, fl
, dn
, cr
,ct
);
1246 afs_setsecattr(struct vnode
*vp
, vsecattr_t
*vsecattr
, int flag
, struct cred
*creds
, struct caller_context_t
*ct
)
1252 afs_getsecattr(struct vnode
*vp
, vsecattr_t
*vsecattr
, int flag
, struct cred
*creds
, struct caller_context_t
*ct
)
1254 return fs_fab_acl(vp
, vsecattr
, flag
, creds
,ct
);
1258 afs_dispose(struct vnode
*vp
, struct page
*p
, int fl
, int dn
, struct cred
*cr
)
1260 fs_dispose(vp
, p
, fl
, dn
, cr
);
1264 afs_setsecattr(struct vnode
*vp
, vsecattr_t
*vsecattr
, int flag
,
1271 afs_getsecattr(struct vnode
*vp
, vsecattr_t
*vsecattr
, int flag
, struct cred
*creds
)
1273 return fs_fab_acl(vp
, vsecattr
, flag
, creds
);
1277 #ifdef AFS_GLOBAL_SUNLOCK
1280 gafs_open(struct vnode
**vpp
, afs_int32 aflags
,
1284 struct vcache
*avc
= VTOAFS(*vpp
);
1287 code
= afs_open(&avc
, aflags
, acred
);
1290 /* afs_open currently never changes avc, but just in case... */
1297 gafs_close(struct vnode
*vp
, afs_int32 aflags
, int count
,
1298 offset_t offset
, afs_ucred_t
*acred
)
1302 code
= afs_close(VTOAFS(vp
), aflags
, count
, offset
, acred
);
1308 gafs_getattr(struct vnode
*vp
, struct vattr
*attrs
,
1309 int flags
, afs_ucred_t
*acred
)
1313 code
= afs_getattr(VTOAFS(vp
), attrs
, flags
, acred
);
1320 gafs_setattr(struct vnode
*vp
, struct vattr
*attrs
,
1321 int flags
, afs_ucred_t
*acred
)
1325 code
= afs_setattr(VTOAFS(vp
), attrs
, flags
, acred
);
1332 gafs_access(struct vnode
*vp
, afs_int32 amode
, int flags
,
1337 code
= afs_access(VTOAFS(vp
), amode
, flags
, acred
);
1344 gafs_lookup(struct vnode
*dvp
, char *aname
,
1345 struct vnode
**vpp
, struct pathname
*pnp
, int flags
,
1346 struct vnode
*rdir
, afs_ucred_t
*acred
)
1349 struct vcache
*tvc
= NULL
;
1352 code
= afs_lookup(VTOAFS(dvp
), aname
, &tvc
, pnp
, flags
, rdir
, acred
);
1365 gafs_create(struct vnode
*dvp
, char *aname
, struct vattr
*attrs
,
1366 enum vcexcl aexcl
, int amode
, struct vnode
**vpp
,
1370 struct vcache
*tvc
= NULL
;
1373 code
= afs_create(VTOAFS(dvp
), aname
, attrs
, aexcl
, amode
, &tvc
, acred
);
1385 gafs_remove(struct vnode
*vp
, char *aname
, afs_ucred_t
*acred
)
1389 code
= afs_remove(VTOAFS(vp
), aname
, acred
);
1395 gafs_link(struct vnode
*dvp
, struct vnode
*svp
,
1396 char *aname
, afs_ucred_t
*acred
)
1400 code
= afs_link(VTOAFS(dvp
), VTOAFS(svp
), aname
, acred
);
1406 gafs_rename(struct vnode
*odvp
, char *aname1
,
1407 struct vnode
*ndvp
, char *aname2
,
1411 struct vcache
*aodp
= VTOAFS(odvp
);
1412 struct vcache
*andp
= VTOAFS(ndvp
);
1415 code
= afs_rename(aodp
, aname1
, andp
, aname2
, acred
);
1416 #ifdef AFS_SUN510_ENV
1418 struct vcache
*avcp
= NULL
;
1420 (void) afs_lookup(andp
, aname2
, &avcp
, NULL
, 0, NULL
, acred
);
1422 struct vnode
*vp
= AFSTOV(avcp
), *pvp
= AFSTOV(andp
);
1424 # ifdef HAVE_VN_RENAMEPATH
1425 vn_renamepath(pvp
, vp
, aname2
, strlen(aname2
));
1427 mutex_enter(&vp
->v_lock
);
1428 if (vp
->v_path
!= NULL
) {
1429 kmem_free(vp
->v_path
, strlen(vp
->v_path
) + 1);
1432 mutex_exit(&vp
->v_lock
);
1433 vn_setpath(afs_globalVp
, pvp
, vp
, aname2
, strlen(aname2
));
1434 # endif /* !HAVE_VN_RENAMEPATH */
1436 AFS_RELE(AFSTOV(avcp
));
1445 gafs_mkdir(struct vnode
*dvp
, char *aname
, struct vattr
*attrs
,
1446 struct vnode
**vpp
, afs_ucred_t
*acred
)
1449 struct vcache
*tvc
= NULL
;
1452 code
= afs_mkdir(VTOAFS(dvp
), aname
, attrs
, &tvc
, acred
);
1464 gafs_rmdir(struct vnode
*vp
, char *aname
, struct vnode
*cdirp
,
1469 code
= afs_rmdir(VTOAFS(vp
), aname
, cdirp
, acred
);
1476 gafs_readdir(struct vnode
*vp
, struct uio
*auio
,
1477 afs_ucred_t
*acred
, int *eofp
)
1481 code
= afs_readdir(VTOAFS(vp
), auio
, acred
, eofp
);
1487 gafs_symlink(struct vnode
*vp
, char *aname
, struct vattr
*attrs
,
1488 char *atargetName
, afs_ucred_t
*acred
)
1492 code
= afs_symlink(VTOAFS(vp
), aname
, attrs
, atargetName
, NULL
, acred
);
1499 gafs_readlink(struct vnode
*vp
, struct uio
*auio
, afs_ucred_t
*acred
)
1503 code
= afs_readlink(VTOAFS(vp
), auio
, acred
);
1509 gafs_fsync(struct vnode
*vp
, int flag
, afs_ucred_t
*acred
)
1513 code
= afs_fsync(VTOAFS(vp
), flag
, acred
);
1519 afs_inactive(struct vcache
*avc
, afs_ucred_t
*acred
)
1521 struct vnode
*vp
= AFSTOV(avc
);
1522 if (afs_shuttingdown
!= AFS_RUNNING
)
1526 * In Solaris and HPUX s800 and HP-UX10.0 they actually call us with
1527 * v_count 1 on last reference!
1529 mutex_enter(&vp
->v_lock
);
1530 if (avc
->vrefCount
<= 0)
1531 osi_Panic("afs_inactive : v_count <=0\n");
1534 * If more than 1 don't unmap the vnode but do decrement the ref count
1537 if (vp
->v_count
> 0) {
1538 mutex_exit(&vp
->v_lock
);
1541 mutex_exit(&vp
->v_lock
);
1543 #ifndef AFS_SUN511_ENV
1545 * Solaris calls VOP_OPEN on exec, but doesn't call VOP_CLOSE when
1546 * the executable exits. So we clean up the open count here.
1548 * Only do this for AFS_MVSTAT_FILE vnodes: when using fakestat, we can't
1549 * lose the open count for volume roots (AFS_MVSTAT_ROOT), even though they
1550 * will get VOP_INACTIVE'd when released by afs_PutFakeStat().
1552 if (avc
->opens
> 0 && avc
->mvstat
== AFS_MVSTAT_FILE
&& !(avc
->f
.states
& CCore
))
1553 avc
->opens
= avc
->execsOrWriters
= 0;
1556 afs_InactiveVCache(avc
, acred
);
1559 /* VFS_RELE must be called outside of GLOCK, since it can potentially
1560 * call afs_freevfs, which acquires GLOCK */
1561 VFS_RELE(afs_globalVFS
);
1568 gafs_inactive(struct vnode
*vp
, afs_ucred_t
*acred
)
1571 (void)afs_inactive(VTOAFS(vp
), acred
);
1577 gafs_fid(struct vnode
*vp
, struct fid
**fidpp
)
1581 code
= afs_fid(VTOAFS(vp
), fidpp
);
1586 #if defined(AFS_SUN511_ENV)
1587 /* The following list must always be NULL-terminated */
1588 const fs_operation_def_t afs_vnodeops_template
[] = {
1589 VOPNAME_OPEN
, { .vop_open
= gafs_open
},
1590 VOPNAME_CLOSE
, { .vop_close
= gafs_close
},
1591 VOPNAME_READ
, { .vop_read
= afs_vmread
},
1592 VOPNAME_WRITE
, { .vop_write
= afs_vmwrite
},
1593 VOPNAME_IOCTL
, { .vop_ioctl
= afs_ioctl
},
1594 VOPNAME_SETFL
, { .vop_setfl
= fs_setfl
},
1595 VOPNAME_GETATTR
, { .vop_getattr
= gafs_getattr
},
1596 VOPNAME_SETATTR
, { .vop_setattr
= gafs_setattr
},
1597 VOPNAME_ACCESS
, { .vop_access
= gafs_access
},
1598 VOPNAME_LOOKUP
, { .vop_lookup
= gafs_lookup
},
1599 VOPNAME_CREATE
, { .vop_create
= gafs_create
},
1600 VOPNAME_REMOVE
, { .vop_remove
= gafs_remove
},
1601 VOPNAME_LINK
, { .vop_link
= gafs_link
},
1602 VOPNAME_RENAME
, { .vop_rename
= gafs_rename
},
1603 VOPNAME_MKDIR
, { .vop_mkdir
= gafs_mkdir
},
1604 VOPNAME_RMDIR
, { .vop_rmdir
= gafs_rmdir
},
1605 VOPNAME_READDIR
, { .vop_readdir
= gafs_readdir
},
1606 VOPNAME_SYMLINK
, { .vop_symlink
= gafs_symlink
},
1607 VOPNAME_READLINK
, { .vop_readlink
= gafs_readlink
},
1608 VOPNAME_FSYNC
, { .vop_fsync
= gafs_fsync
},
1609 VOPNAME_INACTIVE
, { .vop_inactive
= gafs_inactive
},
1610 VOPNAME_FID
, { .vop_fid
= gafs_fid
},
1611 VOPNAME_RWLOCK
, { .vop_rwlock
= afs_rwlock
},
1612 VOPNAME_RWUNLOCK
, { .vop_rwunlock
= afs_rwunlock
},
1613 VOPNAME_SEEK
, { .vop_seek
= afs_seek
},
1614 VOPNAME_CMP
, { .vop_cmp
= afs_cmp
},
1615 VOPNAME_FRLOCK
, { .vop_frlock
= afs_frlock
},
1616 VOPNAME_SPACE
, { .vop_space
= afs_space
},
1617 VOPNAME_REALVP
, { .vop_realvp
= afs_realvp
},
1618 VOPNAME_GETPAGE
, { .vop_getpage
= afs_getpage
},
1619 VOPNAME_PUTPAGE
, { .vop_putpage
= afs_putpage
},
1620 VOPNAME_MAP
, { .vop_map
= afs_map
},
1621 VOPNAME_ADDMAP
, { .vop_addmap
= afs_addmap
},
1622 VOPNAME_DELMAP
, { .vop_delmap
= afs_delmap
},
1623 VOPNAME_POLL
, { .vop_poll
= fs_poll
},
1624 VOPNAME_PATHCONF
, { .vop_pathconf
= afs_pathconf
},
1625 VOPNAME_PAGEIO
, { .vop_pageio
= afs_pageio
},
1626 VOPNAME_DUMP
, { .vop_dump
= afs_dump
},
1627 VOPNAME_DUMPCTL
, { .vop_dumpctl
= afs_dumpctl
},
1628 VOPNAME_DISPOSE
, { .vop_dispose
= afs_dispose
},
1629 VOPNAME_GETSECATTR
, { .vop_getsecattr
= afs_getsecattr
},
1630 VOPNAME_SETSECATTR
, { .vop_setsecattr
= afs_setsecattr
},
1631 VOPNAME_SHRLOCK
, { .vop_shrlock
= fs_shrlock
},
1634 vnodeops_t
*afs_ops
;
1635 #elif defined(AFS_SUN510_ENV)
1636 /* The following list must always be NULL-terminated */
1637 const fs_operation_def_t afs_vnodeops_template
[] = {
1638 VOPNAME_OPEN
, gafs_open
,
1639 VOPNAME_CLOSE
, gafs_close
,
1640 VOPNAME_READ
, afs_vmread
,
1641 VOPNAME_WRITE
, afs_vmwrite
,
1642 VOPNAME_IOCTL
, afs_ioctl
,
1643 VOPNAME_SETFL
, fs_setfl
,
1644 VOPNAME_GETATTR
, gafs_getattr
,
1645 VOPNAME_SETATTR
, gafs_setattr
,
1646 VOPNAME_ACCESS
, gafs_access
,
1647 VOPNAME_LOOKUP
, gafs_lookup
,
1648 VOPNAME_CREATE
, gafs_create
,
1649 VOPNAME_REMOVE
, gafs_remove
,
1650 VOPNAME_LINK
, gafs_link
,
1651 VOPNAME_RENAME
, gafs_rename
,
1652 VOPNAME_MKDIR
, gafs_mkdir
,
1653 VOPNAME_RMDIR
, gafs_rmdir
,
1654 VOPNAME_READDIR
, gafs_readdir
,
1655 VOPNAME_SYMLINK
, gafs_symlink
,
1656 VOPNAME_READLINK
, gafs_readlink
,
1657 VOPNAME_FSYNC
, gafs_fsync
,
1658 VOPNAME_INACTIVE
, gafs_inactive
,
1659 VOPNAME_FID
, gafs_fid
,
1660 VOPNAME_RWLOCK
, afs_rwlock
,
1661 VOPNAME_RWUNLOCK
, afs_rwunlock
,
1662 VOPNAME_SEEK
, afs_seek
,
1663 VOPNAME_CMP
, afs_cmp
,
1664 VOPNAME_FRLOCK
, afs_frlock
,
1665 VOPNAME_SPACE
, afs_space
,
1666 VOPNAME_REALVP
, afs_realvp
,
1667 VOPNAME_GETPAGE
, afs_getpage
,
1668 VOPNAME_PUTPAGE
, afs_putpage
,
1669 VOPNAME_MAP
, afs_map
,
1670 VOPNAME_ADDMAP
, afs_addmap
,
1671 VOPNAME_DELMAP
, afs_delmap
,
1672 VOPNAME_POLL
, fs_poll
,
1673 VOPNAME_DUMP
, afs_dump
,
1674 VOPNAME_PATHCONF
, afs_pathconf
,
1675 VOPNAME_PAGEIO
, afs_pageio
,
1676 VOPNAME_DUMPCTL
, afs_dumpctl
,
1677 VOPNAME_DISPOSE
, afs_dispose
,
1678 VOPNAME_GETSECATTR
, afs_getsecattr
,
1679 VOPNAME_SETSECATTR
, afs_setsecattr
,
1680 VOPNAME_SHRLOCK
, fs_shrlock
,
1683 struct vnodeops
*afs_ops
;
1685 struct vnodeops Afs_vnodeops
= {
1730 struct vnodeops
*afs_ops
= &Afs_vnodeops
;
1733 #endif /* AFS_GLOBAL_SUNLOCK */