Import Upstream version 1.8.5
[hcoop/debian/openafs.git] / src / afs / IRIX / osi_vfsops.c
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10 /*
11 * osi_vfsops.c for IRIX
12 */
13 #include <afsconfig.h>
14 #include "afs/param.h"
15
16
17 #include "afs/sysincludes.h" /* Standard vendor system headers */
18 #include "afsincludes.h" /* Afs-based standard headers */
19 #include "afs/afs_stats.h" /* statistics stuff */
20 #include "sys/syssgi.h"
21
22
23 struct vfs *afs_globalVFS = 0;
24 struct vcache *afs_globalVp = 0;
25
26 #ifdef AFS_SGI_VNODE_GLUE
27 #include <sys/invent.h>
28 int afs_is_numa_arch;
29 mutex_t afs_init_kern_lock;
30 #endif
31
32
33 #define SYS_setgroups SGI_SETGROUPS
34
35 int afs_fstype;
36 lock_t afs_rxlock;
37
38 #include "sys/mload.h"
39 char *Afs_mversion = M_VERSION;
40
41 extern int (*setgroupsp) (int, gid_t *);
42 extern struct afs_lock afs_xvcache;
43 extern int idbg_afsuser();
44 extern void afs_mpservice(void *);
45
46 /*
47 * AFS fs initialization - we also plug system calls here
48 */
49 #define NewSystemCall(n,f,a) \
50 syscallsw[ABI_IRIX5].sc_sysent[(n)-1000].sy_narg = a; \
51 syscallsw[ABI_IRIX5].sc_sysent[(n)-1000].sy_call = f; \
52 syscallsw[ABI_IRIX5].sc_sysent[(n)-1000].sy_flags = 0;
53 extern struct vfsops Afs_vfsops, *afs_vfsopsp;
54 extern struct vnodeops Afs_vnodeops, *afs_vnodeopsp;
55 extern void (*afsidestroyp) (struct inode *);
56 extern void afsidestroy(struct inode *);
57 extern int (*idbg_prafsnodep) (vnode_t *);
58 extern int (*idbg_afsvfslistp) (void);
59 extern int idbg_prafsnode(vnode_t *);
60 extern int idbg_afsvfslist(void);
61
62
63 int
64 Afs_init(struct vfssw *vswp, int fstype)
65 {
66 extern int Afs_syscall(), Afs_xsetgroups(), afs_pioctl(), afs_setpag();
67 extern int icreate(), iopen(), iinc(), idec();
68 #ifdef AFS_SGI_XFS_IOPS_ENV
69 extern int iopen64();
70 #else
71 extern int iread(), iwrite();
72 #endif
73
74 AFS_STATCNT(afsinit);
75 osi_Init();
76 afs_fstype = fstype;
77
78 #ifdef AFS_SGI_VNODE_GLUE
79 /* Synchronize doing NUMA test. */
80 mutex_init(&afs_init_kern_lock, MUTEX_DEFAULT, "init_kern_lock");
81 #endif
82 /*
83 * set up pointers from main kernel into us
84 */
85 afs_vnodeopsp = &Afs_vnodeops;
86 afs_vfsopsp = &Afs_vfsops;
87 afsidestroyp = afsidestroy;
88 idbg_prafsnodep = idbg_prafsnode;
89 idbg_afsvfslistp = idbg_afsvfslist;
90 NewSystemCall(AFS_SYSCALL, Afs_syscall, 6);
91 NewSystemCall(AFS_PIOCTL, afs_pioctl, 4);
92 NewSystemCall(AFS_SETPAG, afs_setpag, 0);
93 NewSystemCall(AFS_IOPEN, iopen, 3);
94 NewSystemCall(AFS_ICREATE, icreate, 6);
95 NewSystemCall(AFS_IINC, iinc, 3);
96 NewSystemCall(AFS_IDEC, idec, 3);
97 #ifdef AFS_SGI_XFS_IOPS_ENV
98 NewSystemCall(AFS_IOPEN64, iopen64, 4);
99 #else
100 NewSystemCall(AFS_IREAD, iread, 6);
101 NewSystemCall(AFS_IWRITE, iwrite, 6);
102 #endif
103
104 /* last replace these */
105 setgroupsp = Afs_xsetgroups;
106
107 idbg_addfunc("afsuser", idbg_afsuser);
108 return (0);
109 }
110
111
112 extern int afs_mount(), afs_unmount(), afs_root(), afs_statfs();
113 #ifdef AFS_SGI65_ENV
114 extern int afs_sync(OSI_VFS_DECL(afsp), int flags, struct cred *cr);
115 #else
116 extern int afs_sync(OSI_VFS_DECL(afsp), short flags, struct cred *cr);
117 #endif
118 extern int afs_vget(OSI_VFS_DECL(afsp), vnode_t ** vpp, struct fid *afidp);
119 #ifdef MP
120 struct vfsops afs_lockedvfsops =
121 #else
122 struct vfsops Afs_vfsops =
123 #endif
124 {
125 #ifdef AFS_SGI64_ENV
126 #ifdef AFS_SGI65_ENV
127 BHV_IDENTITY_INIT_POSITION(VFS_POSITION_BASE),
128 #else
129 VFS_POSITION_BASE,
130 #endif
131 #endif
132 afs_mount,
133 #ifdef AFS_SGI64_ENV
134 fs_nosys, /* rootinit */
135 fs_nosys, /* mntupdate */
136 fs_dounmount,
137 #endif
138 afs_unmount,
139 afs_root,
140 afs_statfs,
141 afs_sync,
142 afs_vget,
143 fs_nosys, /* mountroot */
144 #ifdef AFS_SGI65_ENV
145 fs_nosys, /* realvfsops */
146 fs_import, /* import */
147 fs_nosys, /* quotactl */
148 #else
149 fs_nosys, /* swapvp */
150 #endif
151 };
152 extern struct afs_q VLRU; /*vcache LRU */
153
154 #ifdef AFS_SGI64_ENV
155 static bhv_desc_t afs_vfs_bhv;
156 #endif
157 afs_mount(struct vfs *afsp, vnode_t * mvp, struct mounta *uap,
158 #ifdef AFS_SGI65_ENV
159 char *attrs,
160 #endif
161 cred_t * cr)
162 {
163 AFS_STATCNT(afs_mount);
164
165 if (!suser())
166 return EPERM;
167
168 if (mvp->v_type != VDIR)
169 return ENOTDIR;
170
171 if (afs_globalVFS) { /* Don't allow remounts. */
172 return EBUSY;
173 }
174
175 afs_globalVFS = afsp;
176 afsp->vfs_bsize = 8192;
177 afsp->vfs_fsid.val[0] = AFS_VFSMAGIC; /* magic */
178 afsp->vfs_fsid.val[1] = afs_fstype;
179 #ifdef AFS_SGI64_ENV
180 vfs_insertbhv(afsp, &afs_vfs_bhv, &Afs_vfsops, &afs_vfs_bhv);
181 #else
182 afsp->vfs_data = NULL;
183 #endif
184 afsp->vfs_fstype = afs_fstype;
185 afsp->vfs_dev = 0xbabebabe; /* XXX this should be unique */
186
187 return 0;
188 }
189
190 afs_unmount(OSI_VFS_ARG(afsp), flags, cr)
191 OSI_VFS_DECL(afsp);
192 int flags;
193 cred_t *cr;
194 {
195 struct vcache *tvc;
196 vnode_t *vp, *rootvp = NULL;
197 struct afs_q *tq;
198 struct afs_q *uq;
199 int error, fv_slept;
200 OSI_VFS_CONVERT(afsp);
201
202 AFS_STATCNT(afs_unmount);
203
204 if (!suser())
205 return EPERM;
206
207 /*
208 * flush all pages from inactive vnodes - return
209 * EBUSY if any still in use
210 */
211 ObtainWriteLock(&afs_xvcache, 172);
212 retry:
213 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
214 tvc = QTOV(tq);
215 uq = QPrev(tq);
216 vp = (vnode_t *) tvc;
217 if (error = afs_FlushVCache(tvc, &fv_slept)) {
218 if (vp->v_flag & VROOT) {
219 rootvp = vp;
220 continue;
221 } else {
222 ReleaseWriteLock(&afs_xvcache);
223 return error;
224 }
225 }
226 if (fv_slept) {
227 goto retry;
228 }
229 }
230
231 /*
232 * rootvp gets lots of ref counts
233 */
234 if (rootvp) {
235 tvc = VTOAFS(rootvp);
236 if (tvc->opens || CheckLock(&tvc->lock) || LockWaiters(&tvc->lock)) {
237 ReleaseWriteLock(&afs_xvcache);
238 return EBUSY;
239 }
240 ReleaseWriteLock(&afs_xvcache);
241 rootvp->v_count = 1;
242 AFS_RELE(rootvp);
243 ObtainWriteLock(&afs_xvcache, 173);
244 afs_FlushVCache(tvc, &fv_slept);
245 }
246 ReleaseWriteLock(&afs_xvcache);
247 afs_globalVFS = 0;
248 afs_shutdown();
249 #ifdef AFS_SGI65_ENV
250 VFS_REMOVEBHV(afsp, &afs_vfs_bhv);
251 #endif
252 return 0;
253 }
254
255
256
257 afs_root(OSI_VFS_ARG(afsp), avpp)
258 OSI_VFS_DECL(afsp);
259 struct vnode **avpp;
260 {
261 afs_int32 code = 0;
262 struct vrequest treq;
263 struct vcache *tvp = 0;
264 OSI_VFS_CONVERT(afsp);
265
266 AFS_STATCNT(afs_root);
267 if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
268 tvp = afs_globalVp;
269 } else {
270 if (afs_globalVp) {
271 afs_PutVCache(afs_globalVp);
272 afs_globalVp = NULL;
273 }
274
275 if (!(code = afs_InitReq(&treq, OSI_GET_CURRENT_CRED()))
276 && !(code = afs_CheckInit())) {
277 tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL);
278 /* we really want this to stay around */
279 if (tvp) {
280 afs_globalVp = tvp;
281 } else
282 code = EIO;
283 }
284 }
285 if (tvp) {
286 int s;
287 VN_HOLD(AFSTOV(tvp));
288 s = VN_LOCK(AFSTOV(tvp));
289 AFSTOV(tvp)->v_flag |= VROOT;
290 VN_UNLOCK(AFSTOV(tvp), s);
291
292 afs_globalVFS = afsp;
293 *avpp = AFSTOV(tvp);
294 }
295
296 afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, *avpp,
297 ICL_TYPE_INT32, code);
298 return code;
299 }
300
301 afs_statfs(OSI_VFS_ARG(afsp), abp, avp)
302 OSI_VFS_DECL(afsp);
303 struct statvfs *abp;
304 struct vnode *avp; /* unused */
305 {
306 OSI_VFS_CONVERT(afsp);
307
308 AFS_STATCNT(afs_statfs);
309 abp->f_bsize = afsp->vfs_bsize;
310 abp->f_frsize = afsp->vfs_bsize;
311 /* Fake a high number below to satisfy programs that use the statfs
312 * call to make sure that there's enough space in the device partition
313 * before storing something there.
314 */
315 abp->f_blocks = abp->f_bfree = abp->f_bavail = abp->f_files =
316 abp->f_ffree = abp->f_favail = AFS_VFS_FAKEFREE;
317
318 abp->f_fsid = AFS_VFSMAGIC; /* magic */
319 strcpy(abp->f_basetype, AFS_MOUNT_AFS);
320 abp->f_flag = 0;
321 abp->f_namemax = 256;
322 return 0;
323 }
324
325
326
327 /*
328 * sync's responsibilities include pushing back DELWRI pages
329 * Things to watch out for:
330 * 1) don't want to hold off new vnodes in the file system
331 * while pushing back pages
332 * 2) since we can deal with un-referenced vndoes need to watch
333 * races with folks who recycle vnodes
334 * Flags:
335 * SYNC_BDFLUSH - do NOT sleep waiting for an inode - also, when
336 * when pushing DELWRI - only push old ones.
337 * SYNC_PDFLUSH - push v_dpages.
338 * SYNC_ATTR - sync attributes - note that ordering considerations
339 * dictate that we also flush dirty pages
340 * SYNC_WAIT - do synchronouse writes - inode & delwri
341 * SYNC_NOWAIT - start delayed writes.
342 * SYNC_DELWRI - look at inodes w/ delwri pages. Other flags
343 * decide how to deal with them.
344 * SYNC_CLOSE - flush delwri and invalidate others.
345 * SYNC_FSDATA - push fs data (e.g. superblocks)
346 */
347
348 extern afs_int32 vcachegen;
349 #define PREEMPT_MASK 0x7f
350 #ifdef AFS_SGI64_ENV
351 #define PREEMPT()
352 #endif
353
354 int
355 afs_sync(OSI_VFS_DECL(afsp),
356 #ifdef AFS_SGI65_ENV
357 int flags,
358 #else
359 short flags,
360 #endif
361 struct cred *cr)
362 {
363 /* Why enable the vfs sync operation?? */
364 int error, lasterr, preempt;
365 struct vcache *tvc;
366 struct vnode *vp;
367 afs_uint32 lvcachegen;
368 struct afs_q *tq;
369 struct afs_q *uq;
370 int s;
371 OSI_VFS_CONVERT(afsp);
372
373 error = lasterr = 0;
374 /*
375 * if not interested in vnodes, skip all this
376 */
377 #ifdef AFS_SGI61_ENV
378 if ((flags & (SYNC_CLOSE | SYNC_DELWRI | SYNC_PDFLUSH)) == 0)
379 goto end;
380 #else /* AFS_SGI61_ENV */
381 if ((flags & (SYNC_CLOSE | SYNC_DELWRI | SYNC_ATTR)) == 0)
382 goto end;
383 #endif /* AFS_SGI61_ENV */
384 loop:
385 ObtainReadLock(&afs_xvcache);
386 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
387 tvc = QTOV(tq);
388 uq = QPrev(tq);
389 vp = (vnode_t *) tvc;
390 /*
391 * Since we push all dirty pages on last close/VOP_INACTIVE
392 * we are only concerned with vnodes with
393 * active reference counts.
394 */
395 s = VN_LOCK(vp);
396 if (vp->v_count == 0) {
397 VN_UNLOCK(vp, s);
398 continue;
399 }
400 if ((flags & SYNC_CLOSE) == 0 && !AFS_VN_DIRTY(vp)) {
401 VN_UNLOCK(vp, s);
402 continue;
403 }
404
405 /*
406 * ignore vnodes which need no flushing
407 */
408 if (flags & SYNC_DELWRI) {
409 if (!AFS_VN_DIRTY(vp)) {
410 VN_UNLOCK(vp, s);
411 continue;
412 }
413 }
414 #ifdef AFS_SGI61_ENV
415 else if (flags & SYNC_PDFLUSH) {
416 if (!VN_GET_DPAGES(vp)) {
417 VN_UNLOCK(vp, s);
418 continue;
419 }
420 }
421 #endif /* AFS_SGI61_ENV */
422
423 vp->v_count++;
424 VN_UNLOCK(vp, s);
425 lvcachegen = vcachegen;
426 ReleaseReadLock(&afs_xvcache);
427
428 /*
429 * Try to lock rwlock without sleeping. If we can't, we must
430 * sleep for rwlock.
431 */
432 if (afs_rwlock_nowait(vp, 1) == 0) {
433 #ifdef AFS_SGI61_ENV
434 if (flags & (SYNC_BDFLUSH | SYNC_PDFLUSH))
435 #else /* AFS_SGI61_ENV */
436 if (flags & SYNC_BDFLUSH)
437 #endif /* AFS_SGI61_ENV */
438 {
439 AFS_RELE(vp);
440 ObtainReadLock(&afs_xvcache);
441 if (vcachegen != lvcachegen) {
442 ReleaseReadLock(&afs_xvcache);
443 goto loop;
444 }
445 continue;
446 }
447 AFS_RWLOCK(vp, VRWLOCK_WRITE);
448 }
449
450 AFS_GUNLOCK();
451 if (flags & SYNC_CLOSE) {
452 PFLUSHINVALVP(vp, (off_t) 0, (off_t) tvc->f.m.Length);
453 }
454 #ifdef AFS_SGI61_ENV
455 else if (flags & SYNC_PDFLUSH) {
456 if (VN_GET_DPAGES(vp)) {
457 pdflush(vp, B_ASYNC);
458 }
459 }
460 #endif /* AFS_SGI61_ENV */
461
462
463 if ((flags & SYNC_DELWRI) && AFS_VN_DIRTY(vp)) {
464 #ifdef AFS_SGI61_ENV
465 PFLUSHVP(vp, (off_t) tvc->f.m.Length,
466 (flags & SYNC_WAIT) ? 0 : B_ASYNC, error);
467 #else /* AFS_SGI61_ENV */
468 if (flags & SYNC_WAIT)
469 /* push all and wait */
470 PFLUSHVP(vp, (off_t) tvc->f.m.Length, (off_t) 0, error);
471 else if (flags & SYNC_BDFLUSH) {
472 /* push oldest */
473 error = pdflush(vp, B_ASYNC);
474 } else {
475 /* push all but don't wait */
476 PFLUSHVP(vp, (off_t) tvc->f.m.Length, (off_t) B_ASYNC, error);
477 }
478 #endif /* AFS_SGI61_ENV */
479 }
480
481 /*
482 * Release vp, check error and whether to preempt, and if
483 * we let go of xvcache lock and someone has changed the
484 * VLRU, restart the loop
485 */
486 AFS_GLOCK();
487 AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
488 AFS_RELE(vp);
489 if (error)
490 lasterr = error;
491 if ((++preempt & PREEMPT_MASK) == 0) {
492 AFS_GUNLOCK();
493 PREEMPT();
494 AFS_GLOCK();
495 }
496 ObtainReadLock(&afs_xvcache);
497 if (vcachegen != lvcachegen) {
498 ReleaseReadLock(&afs_xvcache);
499 goto loop;
500 }
501 }
502 ReleaseReadLock(&afs_xvcache);
503 end:
504 return lasterr;
505 }
506
507
508 afs_vget(OSI_VFS_DECL(afsp), vnode_t ** avcp, struct fid * fidp)
509 {
510 struct VenusFid vfid;
511 struct vrequest treq;
512 struct cell *tcell;
513 afs_int32 code = 0;
514 afs_int32 ret;
515
516 #if defined(AFS_SGI64_ENV) && defined(CKPT) && !defined(_R5000_CVT_WAR)
517 afs_fid2_t *afid2;
518 #endif
519
520 OSI_VFS_CONVERT(afsp);
521
522 AFS_STATCNT(afs_vget);
523
524 *avcp = NULL;
525
526 #if defined(AFS_SGI64_ENV) && defined(CKPT) && !defined(_R5000_CVT_WAR)
527 afid2 = (afs_fid2_t *) fidp;
528 if (afid2->af_len == sizeof(afs_fid2_t) - sizeof(afid2->af_len)) {
529 /* It's a checkpoint restart fid. */
530 tcell = afs_GetCellByIndex(afid2->af_cell, READ_LOCK);
531 if (!tcell) {
532 code = EIO;
533 goto out;
534 }
535 vfid.Cell = tcell->cellNum;
536 afs_PutCell(tcell, READ_LOCK);
537 vfid.Fid.Volume = afid2->af_volid;
538 vfid.Fid.Vnode = afid2->af_vno;
539 vfid.Fid.Unique = afid2->af_uniq;
540
541 if (code = afs_InitReq(&treq, OSI_GET_CURRENT_CRED()))
542 goto out;
543 *avcp =
544 (vnode_t *) afs_GetVCache(&vfid, &treq, NULL, (struct vcache *)0);
545 if (!*avcp) {
546 code = EIO;
547 }
548 goto out;
549 }
550 #endif
551
552 if (code = afs_InitReq(&treq, OSI_GET_CURRENT_CRED()))
553 goto out;
554 code = afs_osi_vget((struct vcache **)avcp, fidp, &treq);
555
556 out:
557 afs_Trace3(afs_iclSetp, CM_TRACE_VGET, ICL_TYPE_POINTER, *avcp,
558 ICL_TYPE_INT32, treq.uid, ICL_TYPE_FID, &vfid);
559 code = afs_CheckCode(code, &treq, 42);
560 return code;
561 }
562
563
564 #ifdef MP /* locked versions of vfs operations. */
565
566 /* wrappers for vfs calls */
567 #ifdef AFS_SGI64_ENV
568 #define AFS_MP_VFS_ARG(A) bhv_desc_t A
569 #else
570 #define AFS_MP_VFS_ARG(A) struct vfs A
571 #endif
572
573 int
574 mp_afs_mount(struct vfs *a, struct vnode *b, struct mounta *c,
575 #ifdef AFS_SGI65_ENV
576 char *d,
577 #endif
578 struct cred *e)
579 {
580 int rv;
581 AFS_GLOCK();
582 rv = afs_lockedvfsops.vfs_mount(a, b, c, d
583 #ifdef AFS_SGI65_ENV
584 , e
585 #endif
586 );
587 AFS_GUNLOCK();
588 return rv;
589 }
590
591 int
592 mp_afs_unmount(AFS_MP_VFS_ARG(*a), int b, struct cred *c)
593 {
594 int rv;
595 AFS_GLOCK();
596 rv = afs_lockedvfsops.vfs_unmount(a, b, c);
597 AFS_GUNLOCK();
598 return rv;
599 }
600
601 int
602 mp_afs_root(AFS_MP_VFS_ARG(*a), struct vnode **b)
603 {
604 int rv;
605 AFS_GLOCK();
606 rv = afs_lockedvfsops.vfs_root(a, b);
607 AFS_GUNLOCK();
608 return rv;
609 }
610
611 int
612 mp_afs_statvfs(AFS_MP_VFS_ARG(*a), struct statvfs *b, struct vnode *c)
613 {
614 int rv;
615 AFS_GLOCK();
616 rv = afs_lockedvfsops.vfs_statvfs(a, b, c);
617 AFS_GUNLOCK();
618 return rv;
619 }
620
621 int
622 mp_afs_sync(AFS_MP_VFS_ARG(*a),
623 #ifdef AFS_SGI65_ENV
624 int b,
625 #else
626 short b,
627 #endif
628 struct cred *c)
629 {
630 int rv;
631 AFS_GLOCK();
632 rv = afs_lockedvfsops.vfs_sync(a, b, c);
633 AFS_GUNLOCK();
634 return rv;
635 }
636
637 int
638 mp_afs_vget(AFS_MP_VFS_ARG(*a), struct vnode **b, struct fid *c)
639 {
640 int rv;
641 AFS_GLOCK();
642 rv = afs_lockedvfsops.vfs_vget(a, b, c);
643 AFS_GUNLOCK();
644 return rv;
645 }
646
647 struct vfsops Afs_vfsops = {
648 #ifdef AFS_SGI64_ENV
649 #ifdef AFS_SGI65_ENV
650 BHV_IDENTITY_INIT_POSITION(VFS_POSITION_BASE),
651 #else
652 VFS_POSITION_BASE,
653 #endif
654 #endif
655 mp_afs_mount,
656 #ifdef AFS_SGI64_ENV
657 fs_nosys, /* rootinit */
658 fs_nosys, /* mntupdate */
659 fs_dounmount,
660 #endif
661 mp_afs_unmount,
662 mp_afs_root,
663 mp_afs_statvfs,
664 mp_afs_sync,
665 mp_afs_vget,
666 fs_nosys, /* mountroot */
667 #ifdef AFS_SGI65_ENV
668 fs_nosys, /* realvfsops */
669 fs_import, /* import */
670 fs_nosys, /* quotactl */
671 #else
672 fs_nosys, /* swapvp */
673 #endif
674 };
675
676 #endif /* MP */