2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
16 #include <sys/sleep.h>
19 #include "afs/sysincludes.h" /* Standard vendor system headers */
20 #include "afsincludes.h" /* Afs-based standard headers */
21 #include "afs/afs_stats.h" /* statistics gathering code */
22 #include "afs/afs_cbqueue.h"
24 #include <sys/adspace.h> /* for vm_att(), vm_det() */
27 #if defined(AFS_CACHE_BYPASS)
28 #include "afs/afs_bypasscache.h"
29 #endif /* AFS_CACHE_BYPASS */
30 /* background request queue size */
31 afs_lock_t afs_xbrs
; /* lock for brs */
32 static int brsInit
= 0;
33 short afs_brsWaiters
= 0; /* number of users waiting for brs buffers */
34 short afs_brsDaemons
= 0; /* number of daemons waiting for brs requests */
35 struct brequest afs_brs
[NBRS
]; /* request structures */
36 struct afs_osi_WaitHandle AFS_WaitHandler
, AFS_CSWaitHandler
;
37 static int afs_brs_count
= 0; /* request counter, to service reqs in order */
39 /* PAG garbage collection */
40 /* We induce a compile error if param.h does not define AFS_GCPAGS */
41 afs_int32 afs_gcpags
= AFS_GCPAGS
;
42 afs_int32 afs_gcpags_procsize
= 0;
44 afs_int32 afs_CheckServerDaemonStarted
= 0;
45 #ifndef DEFAULT_PROBE_INTERVAL
46 #define DEFAULT_PROBE_INTERVAL 30 /* default to 3 min */
48 afs_int32 afs_probe_interval
= DEFAULT_PROBE_INTERVAL
;
49 afs_int32 afs_probe_all_interval
= 600;
50 afs_int32 afs_nat_probe_interval
= 60;
51 afs_int32 afs_preCache
= 0;
53 #define PROBE_WAIT() (1000 * (afs_probe_interval - ((afs_random() & 0x7fffffff) \
54 % (afs_probe_interval/2))))
57 afs_SetCheckServerNATmode(int isnat
)
59 static afs_int32 old_intvl
, old_all_intvl
;
62 if (isnat
&& !wasnat
) {
63 old_intvl
= afs_probe_interval
;
64 old_all_intvl
= afs_probe_all_interval
;
65 afs_probe_interval
= afs_nat_probe_interval
;
66 afs_probe_all_interval
= afs_nat_probe_interval
;
67 afs_osi_CancelWait(&AFS_CSWaitHandler
);
68 } else if (!isnat
&& wasnat
) {
69 afs_probe_interval
= old_intvl
;
70 afs_probe_all_interval
= old_all_intvl
;
76 afs_CheckServerDaemon(void)
78 afs_int32 now
, delay
, lastCheck
, last10MinCheck
;
80 afs_CheckServerDaemonStarted
= 1;
82 while (afs_initState
< 101)
83 afs_osi_Sleep(&afs_initState
);
84 afs_osi_Wait(PROBE_WAIT(), &AFS_CSWaitHandler
, 0);
86 last10MinCheck
= lastCheck
= osi_Time();
88 if (afs_termState
== AFSOP_STOP_CS
) {
89 afs_termState
= AFSOP_STOP_TRUNCDAEMON
;
90 afs_osi_Wakeup(&afs_termState
);
95 if (afs_probe_interval
+ lastCheck
<= now
) {
96 afs_CheckServers(1, NULL
); /* check down servers */
97 lastCheck
= now
= osi_Time();
100 if (afs_probe_all_interval
+ last10MinCheck
<= now
) {
101 afs_Trace1(afs_iclSetp
, CM_TRACE_PROBEUP
, ICL_TYPE_INT32
, afs_probe_all_interval
);
102 afs_CheckServers(0, NULL
);
103 last10MinCheck
= now
= osi_Time();
105 /* shutdown check. */
106 if (afs_termState
== AFSOP_STOP_CS
) {
107 afs_termState
= AFSOP_STOP_TRUNCDAEMON
;
108 afs_osi_Wakeup(&afs_termState
);
112 /* Compute time to next probe. */
113 delay
= afs_probe_interval
+ lastCheck
;
114 if (delay
> afs_probe_all_interval
+ last10MinCheck
)
115 delay
= afs_probe_all_interval
+ last10MinCheck
;
119 afs_osi_Wait(delay
* 1000, &AFS_CSWaitHandler
, 0);
121 afs_CheckServerDaemonStarted
= 0;
124 extern int vfs_context_ref
;
126 /* This function always holds the GLOCK whilst it is running. The caller
127 * gets the GLOCK before invoking it, and afs_osi_Sleep drops the GLOCK
128 * whilst we are sleeping, and regains it when we're woken up.
134 struct afs_exporter
*exporter
;
136 afs_int32 last3MinCheck
, last10MinCheck
, last60MinCheck
, lastNMinCheck
;
137 afs_int32 last1MinCheck
, last5MinCheck
;
138 afs_uint32 lastCBSlotBump
;
140 AFS_STATCNT(afs_Daemon
);
142 afs_rootFid
.Fid
.Volume
= 0;
143 while (afs_initState
< 101)
144 afs_osi_Sleep(&afs_initState
);
146 #ifdef AFS_DARWIN80_ENV
147 if (afs_osi_ctxtp_initialized
)
148 osi_Panic("vfs context already initialized");
149 while (afs_osi_ctxtp
&& vfs_context_ref
)
150 afs_osi_Sleep(&afs_osi_ctxtp
);
151 if (afs_osi_ctxtp
&& !vfs_context_ref
)
152 vfs_context_rele(afs_osi_ctxtp
);
153 afs_osi_ctxtp
= vfs_context_create(NULL
);
154 afs_osi_ctxtp_initialized
= 1;
157 lastCBSlotBump
= now
;
159 /* when a lot of clients are booted simultaneously, they develop
160 * annoying synchronous VL server bashing behaviors. So we stagger them.
162 last1MinCheck
= now
+ ((afs_random() & 0x7fffffff) % 60); /* an extra 30 */
163 last3MinCheck
= now
- 90 + ((afs_random() & 0x7fffffff) % 180);
164 last60MinCheck
= now
- 1800 + ((afs_random() & 0x7fffffff) % 3600);
165 last10MinCheck
= now
- 300 + ((afs_random() & 0x7fffffff) % 600);
166 last5MinCheck
= now
- 150 + ((afs_random() & 0x7fffffff) % 300);
167 lastNMinCheck
= now
- 90 + ((afs_random() & 0x7fffffff) % 180);
169 /* start off with afs_initState >= 101 (basic init done) */
171 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
173 /* things to do every 20 seconds or less - required by protocol spec */
175 afs_FlushActiveVcaches(0); /* flush NFS writes */
176 afs_FlushVCBs(1); /* flush queued callbacks */
178 afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */
179 rx_CheckPackets(); /* Does RX need more packets? */
182 if (lastCBSlotBump
+ CBHTSLOTLEN
< now
) { /* pretty time-dependant */
183 lastCBSlotBump
= now
;
184 if (afs_BumpBase()) {
185 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
189 if (last1MinCheck
+ 60 < now
) {
190 /* things to do every minute */
191 DFlush(); /* write out dir buffers */
192 (void)afs_WriteThroughDSlots(); /* write through cacheinfo entries */
193 ObtainWriteLock(&afs_xvcache
, 736);
194 afs_FlushReclaimedVcaches();
195 ReleaseWriteLock(&afs_xvcache
);
196 afs_FlushActiveVcaches(1); /* keep flocks held & flush nfs writes */
198 afs_StoreDirtyVcaches();
203 if (last3MinCheck
+ 180 < now
) {
204 afs_CheckTokenCache(); /* check for access cache resets due to expired
209 if (afsd_dynamic_vcaches
&& (last5MinCheck
+ 300 < now
)) {
210 /* start with trying to drop us back to our base usage */
211 int anumber
= VCACHE_FREE
+ (afs_vcount
- afs_cacheStats
);
214 ObtainWriteLock(&afs_xvcache
, 734);
215 afs_ShakeLooseVCaches(anumber
);
216 ReleaseWriteLock(&afs_xvcache
);
221 if (!afs_CheckServerDaemonStarted
) {
222 if (lastNMinCheck
+ afs_probe_interval
< now
) {
223 /* only check down servers */
224 afs_CheckServers(1, NULL
);
228 if (last10MinCheck
+ 600 < now
) {
229 #ifdef AFS_USERSPACE_IP_ADDR
230 extern int rxi_GetcbiInfo(void);
232 afs_Trace1(afs_iclSetp
, CM_TRACE_PROBEUP
, ICL_TYPE_INT32
, 600);
233 #ifdef AFS_USERSPACE_IP_ADDR
234 if (rxi_GetcbiInfo()) { /* addresses changed from last time */
237 #else /* AFS_USERSPACE_IP_ADDR */
238 if (rxi_GetIFInfo()) { /* addresses changed from last time */
241 #endif /* else AFS_USERSPACE_IP_ADDR */
242 if (!afs_CheckServerDaemonStarted
)
243 afs_CheckServers(0, NULL
);
244 afs_GCUserData(); /* gc old conns */
245 /* This is probably the wrong way of doing GC for the various exporters but it will suffice for a while */
246 for (exporter
= root_exported
; exporter
;
247 exporter
= exporter
->exp_next
) {
248 (void)EXP_GC(exporter
, 0); /* Generalize params */
253 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED
|
257 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED
|
262 last10MinCheck
= now
;
264 if (last60MinCheck
+ 3600 < now
) {
265 afs_Trace1(afs_iclSetp
, CM_TRACE_PROBEVOLUME
, ICL_TYPE_INT32
,
267 afs_CheckRootVolume();
269 if (afs_gcpags
== AFS_GCPAGS_OK
) {
274 last60MinCheck
= now
;
276 if (afs_initState
< 300) { /* while things ain't rosy */
277 code
= afs_CheckRootVolume();
279 afs_initState
= 300; /* succeeded */
280 if (afs_initState
< 200)
281 afs_initState
= 200; /* tried once */
282 afs_osi_Wakeup(&afs_initState
);
285 /* 18285 is because we're trying to divide evenly into 128, that is,
286 * CBSlotLen, while staying just under 20 seconds. If CBSlotLen
287 * changes, should probably change this interval, too.
288 * Some of the preceding actions may take quite some time, so we
289 * might not want to wait the entire interval */
290 now
= 18285 - (osi_Time() - now
);
292 afs_osi_Wait(now
, &AFS_WaitHandler
, 0);
295 if (afs_termState
== AFSOP_STOP_AFS
) {
296 if (afs_CheckServerDaemonStarted
)
297 afs_termState
= AFSOP_STOP_CS
;
299 afs_termState
= AFSOP_STOP_TRUNCDAEMON
;
300 afs_osi_Wakeup(&afs_termState
);
307 afs_CheckRootVolume(void)
309 char rootVolName
[MAXROOTVOLNAMELEN
];
310 struct volume
*tvp
= NULL
;
311 int usingDynroot
= afs_GetDynrootEnable();
314 AFS_STATCNT(afs_CheckRootVolume
);
315 if (*afs_rootVolumeName
== 0) {
316 strcpy(rootVolName
, "root.afs");
318 strcpy(rootVolName
, afs_rootVolumeName
);
322 afs_GetDynrootFid(&afs_rootFid
);
323 tvp
= afs_GetVolume(&afs_rootFid
, NULL
, READ_LOCK
);
325 struct cell
*lc
= afs_GetPrimaryCell(READ_LOCK
);
329 localcell
= lc
->cellNum
;
330 afs_PutCell(lc
, READ_LOCK
);
331 tvp
= afs_GetVolumeByName(rootVolName
, localcell
, 1, NULL
, READ_LOCK
);
334 int len
= strlen(rootVolName
);
336 if ((len
< 9) || strcmp(&rootVolName
[len
- 9], ".readonly")) {
337 strcpy(buf
, rootVolName
);
338 afs_strcat(buf
, ".readonly");
339 tvp
= afs_GetVolumeByName(buf
, localcell
, 1, NULL
, READ_LOCK
);
343 int volid
= (tvp
->roVol
? tvp
->roVol
: tvp
->volume
);
344 afs_rootFid
.Cell
= localcell
;
345 if (afs_rootFid
.Fid
.Volume
&& afs_rootFid
.Fid
.Volume
!= volid
347 /* If we had a root fid before and it changed location we reset
348 * the afs_globalVp so that it will be reevaluated.
349 * Just decrement the reference count. This only occurs during
350 * initial cell setup and can panic the machine if we set the
351 * count to zero and fs checkv is executed when the current
354 #ifdef AFS_LINUX22_ENV
355 osi_ResetRootVCache(volid
);
357 # ifdef AFS_DARWIN80_ENV
358 afs_PutVCache(afs_globalVp
);
360 AFS_FAST_RELE(afs_globalVp
);
365 afs_rootFid
.Fid
.Volume
= volid
;
366 afs_rootFid
.Fid
.Vnode
= 1;
367 afs_rootFid
.Fid
.Unique
= 1;
371 afs_initState
= 300; /* won */
372 afs_osi_Wakeup(&afs_initState
);
373 afs_PutVolume(tvp
, READ_LOCK
);
375 if (afs_rootFid
.Fid
.Volume
)
381 /* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */
383 BPath(struct brequest
*ab
)
385 struct dcache
*tdc
= NULL
;
386 struct vcache
*tvc
= NULL
;
387 struct vnode
*tvn
= NULL
;
388 #ifdef AFS_LINUX22_ENV
389 struct dentry
*dp
= NULL
;
391 afs_size_t offset
, len
;
392 struct vrequest
*treq
= NULL
;
396 if ((code
= afs_CreateReq(&treq
, ab
->cred
))) {
400 #ifdef AFS_LINUX22_ENV
401 code
= gop_lookupname((char *)ab
->ptr_parm
[0], AFS_UIOSYS
, 1, &dp
);
403 tvn
= (struct vnode
*)dp
->d_inode
;
405 code
= gop_lookupname((char *)ab
->ptr_parm
[0], AFS_UIOSYS
, 1, &tvn
);
408 osi_FreeLargeSpace((char *)ab
->ptr_parm
[0]); /* free path name buffer here */
410 afs_DestroyReq(treq
);
413 /* now path may not have been in afs, so check that before calling our cache manager */
414 if (!tvn
|| !IsAfsVnode(tvn
)) {
415 /* release it and give up */
417 #ifdef AFS_LINUX22_ENV
423 afs_DestroyReq(treq
);
427 /* here we know its an afs vnode, so we can get the data for the chunk */
428 tdc
= afs_GetDCache(tvc
, ab
->size_parm
[0], treq
, &offset
, &len
, 1);
432 #ifdef AFS_LINUX22_ENV
437 afs_DestroyReq(treq
);
440 /* size_parm 0 to the fetch is the chunk number,
441 * ptr_parm 0 is the dcache entry to wakeup,
442 * size_parm 1 is true iff we should release the dcache entry here.
445 BPrefetch(struct brequest
*ab
)
449 afs_size_t offset
, len
, abyte
, totallen
= 0;
450 struct vrequest
*treq
= NULL
;
453 AFS_STATCNT(BPrefetch
);
454 if ((code
= afs_CreateReq(&treq
, ab
->cred
)))
456 abyte
= ab
->size_parm
[0];
459 tdc
= afs_GetDCache(tvc
, abyte
, treq
, &offset
, &len
, 1);
465 } while ((totallen
< afs_preCache
) && tdc
&& (len
> 0));
466 /* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't
467 * use tdc from GetDCache since afs_GetDCache may fail, but someone may
468 * be waiting for our wakeup anyway.
470 tdc
= (struct dcache
*)(ab
->ptr_parm
[0]);
471 ObtainSharedLock(&tdc
->lock
, 640);
472 if (tdc
->mflags
& DFFetchReq
) {
473 UpgradeSToWLock(&tdc
->lock
, 641);
474 tdc
->mflags
&= ~DFFetchReq
;
475 ReleaseWriteLock(&tdc
->lock
);
477 ReleaseSharedLock(&tdc
->lock
);
479 afs_osi_Wakeup(&tdc
->validPos
);
480 if (ab
->size_parm
[1]) {
481 afs_PutDCache(tdc
); /* put this one back, too */
483 afs_DestroyReq(treq
);
486 #if defined(AFS_CACHE_BYPASS)
488 BPrefetchNoCache(struct brequest
*ab
)
490 struct vrequest
*treq
= NULL
;
493 if ((code
= afs_CreateReq(&treq
, ab
->cred
)))
497 /* OS-specific prefetch routine */
498 afs_PrefetchNoCache(ab
->vc
, ab
->cred
, (struct nocache_read_request
*) ab
->ptr_parm
[0]);
500 afs_DestroyReq(treq
);
505 BStore(struct brequest
*ab
)
509 struct vrequest
*treq
= NULL
;
510 #if defined(AFS_SGI_ENV)
511 struct cred
*tmpcred
;
515 if ((code
= afs_CreateReq(&treq
, ab
->cred
)))
518 #if defined(AFS_SGI_ENV)
520 * Since StoreOnLastReference can end up calling osi_SyncVM which
521 * calls into VM code that assumes that u.u_cred has the
522 * correct credentials, we set our to theirs for this xaction
524 tmpcred
= OSI_GET_CURRENT_CRED();
525 OSI_SET_CURRENT_CRED(ab
->cred
);
528 * To avoid recursion since the WriteLock may be released during VM
529 * operations, we hold the VOP_RWLOCK across this transaction as
530 * do the other callers of StoreOnLastReference
532 AFS_RWLOCK((vnode_t
*) tvc
, 1);
534 ObtainWriteLock(&tvc
->lock
, 209);
535 code
= afs_StoreOnLastReference(tvc
, treq
);
536 ReleaseWriteLock(&tvc
->lock
);
537 #if defined(AFS_SGI_ENV)
538 OSI_SET_CURRENT_CRED(tmpcred
);
539 AFS_RWUNLOCK((vnode_t
*) tvc
, 1);
541 /* now set final return code, and wakeup anyone waiting */
542 if ((ab
->flags
& BUVALID
) == 0) {
544 /* To explain code_raw/code_checkcode:
545 * Anyone that's waiting won't have our treq, so they won't be able to
546 * call afs_CheckCode themselves on the return code we provide here.
547 * But if we give back only the afs_CheckCode value, they won't know
548 * what the "raw" value was. So give back both values, so the waiter
549 * can know the "raw" value for interpreting the value internally, as
550 * well as the afs_CheckCode value to give to the OS. */
552 ab
->code_checkcode
= afs_CheckCode(code
, treq
, 430);
554 ab
->flags
|= BUVALID
;
555 if (ab
->flags
& BUWAIT
) {
556 ab
->flags
&= ~BUWAIT
;
560 afs_DestroyReq(treq
);
564 BPartialStore(struct brequest
*ab
)
568 struct vrequest
*treq
= NULL
;
569 int locked
, shared_locked
= 0;
572 if ((code
= afs_CreateReq(&treq
, ab
->cred
)))
575 locked
= tvc
->lock
.excl_locked
? 1:0;
577 ObtainWriteLock(&tvc
->lock
, 1209);
578 else if (!(tvc
->lock
.excl_locked
& WRITE_LOCK
)) {
580 ConvertSToRLock(&tvc
->lock
);
582 code
= afs_StoreAllSegments(tvc
, treq
, AFS_ASYNC
);
584 ReleaseWriteLock(&tvc
->lock
);
585 else if (shared_locked
)
586 ConvertSToRLock(&tvc
->lock
);
587 /* now set final return code, and wakeup anyone waiting */
588 if ((ab
->flags
& BUVALID
) == 0) {
589 /* set final code, since treq doesn't go across processes */
591 ab
->code_checkcode
= afs_CheckCode(code
, treq
, 43);
592 ab
->flags
|= BUVALID
;
593 if (ab
->flags
& BUWAIT
) {
594 ab
->flags
&= ~BUWAIT
;
598 afs_DestroyReq(treq
);
601 /* release a held request buffer */
603 afs_BRelease(struct brequest
*ab
)
606 AFS_STATCNT(afs_BRelease
);
607 ObtainWriteLock(&afs_xbrs
, 294);
608 if (--ab
->refCount
<= 0) {
612 afs_osi_Wakeup(&afs_brsWaiters
);
613 ReleaseWriteLock(&afs_xbrs
);
616 /* return true if bkg fetch daemons are all busy */
620 AFS_STATCNT(afs_BBusy
);
621 if (afs_brsDaemons
> 0)
627 afs_BQueue(short aopcode
, struct vcache
*avc
,
628 afs_int32 dontwait
, afs_int32 ause
, afs_ucred_t
*acred
,
629 afs_size_t asparm0
, afs_size_t asparm1
, void *apparm0
,
630 void *apparm1
, void *apparm2
)
635 AFS_STATCNT(afs_BQueue
);
636 ObtainWriteLock(&afs_xbrs
, 296);
639 for (i
= 0; i
< NBRS
; i
++, tb
++) {
640 if (tb
->refCount
== 0)
645 tb
->opcode
= aopcode
;
654 tb
->refCount
= ause
+ 1;
655 tb
->size_parm
[0] = asparm0
;
656 tb
->size_parm
[1] = asparm1
;
657 tb
->ptr_parm
[0] = apparm0
;
658 tb
->ptr_parm
[1] = apparm1
;
659 tb
->ptr_parm
[2] = apparm2
;
661 tb
->code_raw
= tb
->code_checkcode
= 0;
662 tb
->ts
= afs_brs_count
++;
663 /* if daemons are waiting for work, wake them up */
664 if (afs_brsDaemons
> 0) {
665 afs_osi_Wakeup(&afs_brsDaemons
);
667 ReleaseWriteLock(&afs_xbrs
);
671 ReleaseWriteLock(&afs_xbrs
);
674 /* no free buffers, sleep a while */
676 ReleaseWriteLock(&afs_xbrs
);
677 afs_osi_Sleep(&afs_brsWaiters
);
678 ObtainWriteLock(&afs_xbrs
, 301);
684 /* AIX 4.1 has a much different sleep/wakeup mechanism available for use.
685 * The modifications here will work for either a UP or MP machine.
687 struct buf
*afs_asyncbuf
= (struct buf
*)0;
688 tid_t afs_asyncbuf_cv
= EVENT_NULL
;
689 afs_int32 afs_biodcnt
= 0;
691 /* in implementing this, I assumed that all external linked lists were
694 * Several places in this code traverse a linked list. The algorithm
695 * used here is probably unfamiliar to most people. Careful examination
696 * will show that it eliminates an assignment inside the loop, as compared
697 * to the standard algorithm, at the cost of occasionally using an extra
703 * This function obtains, and returns, a pointer to a buffer for
704 * processing by a daemon. It sleeps until such a buffer is available.
705 * The source of buffers for it is the list afs_asyncbuf (see also
706 * afs_gn_strategy). This function may be invoked concurrently by
707 * several processes, that is, several instances of the same daemon.
708 * afs_gn_strategy, which adds buffers to the list, runs at interrupt
709 * level, while get_bioreq runs at process level.
711 * Since AIX 4.1 can wake just one process at a time, the separate sleep
712 * addresses have been removed.
713 * Note that the kernel_lock is held until the e_sleep_thread() occurs.
714 * The afs_asyncbuf_lock is primarily used to serialize access between
715 * process and interrupts.
717 Simple_lock afs_asyncbuf_lock
;
721 struct buf
*bp
= NULL
;
723 struct buf
**bestlbpP
, **lbpP
;
725 struct buf
*t1P
, *t2P
; /* temp pointers for list manipulation */
728 struct afs_bioqueue
*s
;
730 /* ??? Does the forward pointer of the returned buffer need to be NULL?
733 /* Disable interrupts from the strategy function, and save the
734 * prior priority level and lock access to the afs_asyncbuf.
737 oldPriority
= disable_lock(INTMAX
, &afs_asyncbuf_lock
);
741 /* look for oldest buffer */
742 bp
= bestbp
= afs_asyncbuf
;
743 bestage
= (long)bestbp
->av_back
;
744 bestlbpP
= &afs_asyncbuf
;
750 if ((long)bp
->av_back
- bestage
< 0) {
753 bestage
= (long)bp
->av_back
;
757 *bestlbpP
= bp
->av_forw
;
760 /* If afs_asyncbuf is null, it is necessary to go to sleep.
761 * e_wakeup_one() ensures that only one thread wakes.
764 /* The LOCK_HANDLER indicates to e_sleep_thread to only drop the
765 * lock on an MP machine.
768 e_sleep_thread(&afs_asyncbuf_cv
, &afs_asyncbuf_lock
,
769 LOCK_HANDLER
| INTERRUPTIBLE
);
770 if (interrupted
== THREAD_INTERRUPTED
) {
771 /* re-enable interrupts from strategy */
772 unlock_enable(oldPriority
, &afs_asyncbuf_lock
);
776 } /* end of "else asyncbuf is empty" */
777 } /* end of "inner loop" */
781 unlock_enable(oldPriority
, &afs_asyncbuf_lock
);
784 /* For the convenience of other code, replace the gnodes in
785 * the b_vp field of bp and the other buffers on the b_work
786 * chain with the corresponding vnodes.
788 * ??? what happens to the gnodes? They're not just cut loose,
792 t2P
= (struct buf
*)t1P
->b_work
;
793 t1P
->b_vp
= ((struct gnode
*)t1P
->b_vp
)->gn_vnode
;
797 t1P
= (struct buf
*)t2P
->b_work
;
798 t2P
->b_vp
= ((struct gnode
*)t2P
->b_vp
)->gn_vnode
;
803 /* If the buffer does not specify I/O, it may immediately
804 * be returned to the caller. This condition is detected
805 * by examining the buffer's flags (the b_flags field). If
806 * the B_PFPROT bit is set, the buffer represents a protection
807 * violation, rather than a request for I/O. The remainder
808 * of the outer loop handles the case where the B_PFPROT bit is clear.
810 if (bp
->b_flags
& B_PFPROT
) {
815 } /* end of function get_bioreq() */
820 * This function is the daemon. It is called from the syscall
821 * interface. Ordinarily, a script or an administrator will run a
822 * daemon startup utility, specifying the number of I/O daemons to
823 * run. The utility will fork off that number of processes,
824 * each making the appropriate syscall, which will cause this
825 * function to be invoked.
827 static int afs_initbiod
= 0; /* this is self-initializing code */
830 afs_BioDaemon(afs_int32 nbiods
)
832 afs_int32 code
, s
, pflg
= 0;
834 struct buf
*bp
, *bp1
, *tbp1
, *tbp2
; /* temp pointers only */
842 /* pin lock, since we'll be using it in an interrupt. */
843 lock_alloc(&afs_asyncbuf_lock
, LOCK_ALLOC_PIN
, 2, 1);
844 simple_lock_init(&afs_asyncbuf_lock
);
845 pin(&afs_asyncbuf
, sizeof(struct buf
*));
846 pin(&afs_asyncbuf_cv
, sizeof(afs_int32
));
849 /* Ignore HUP signals... */
851 sigset_t sigbits
, osigbits
;
853 * add SIGHUP to the set of already masked signals
855 SIGFILLSET(sigbits
); /* allow all signals */
856 SIGDELSET(sigbits
, SIGHUP
); /* except SIGHUP */
857 limit_sigs(&sigbits
, &osigbits
); /* and already masked */
859 /* Main body starts here -- this is an intentional infinite loop, and
862 * Now, the loop will exit if get_bioreq() returns NULL, indicating
863 * that we've been interrupted.
866 bp
= afs_get_bioreq();
868 break; /* we were interrupted */
869 if (code
= setjmpx(&jmpbuf
)) {
870 /* This should not have happend, maybe a lack of resources */
872 s
= disable_lock(INTMAX
, &afs_asyncbuf_lock
);
873 for (bp1
= bp
; bp
; bp
= bp1
) {
875 bp1
= (struct buf
*)bp1
->b_work
;
878 bp
->b_flags
|= B_ERROR
;
881 unlock_enable(s
, &afs_asyncbuf_lock
);
885 vcp
= VTOAFS(bp
->b_vp
);
886 if (bp
->b_flags
& B_PFSTORE
) { /* XXXX */
887 ObtainWriteLock(&vcp
->lock
, 404);
888 if (vcp
->v
.v_gnode
->gn_mwrcnt
) {
889 afs_offs_t newlength
=
890 (afs_offs_t
) dbtob(bp
->b_blkno
) + bp
->b_bcount
;
891 if (vcp
->f
.m
.Length
< newlength
) {
892 afs_Trace4(afs_iclSetp
, CM_TRACE_SETLENGTH
,
893 ICL_TYPE_STRING
, __FILE__
, ICL_TYPE_LONG
,
894 __LINE__
, ICL_TYPE_OFFSET
,
895 ICL_HANDLE_OFFSET(vcp
->f
.m
.Length
),
896 ICL_TYPE_OFFSET
, ICL_HANDLE_OFFSET(newlength
));
897 vcp
->f
.m
.Length
= newlength
;
900 ReleaseWriteLock(&vcp
->lock
);
902 /* If the buffer represents a protection violation, rather than
903 * an actual request for I/O, no special action need be taken.
905 if (bp
->b_flags
& B_PFPROT
) {
906 iodone(bp
); /* Notify all users of the buffer that we're done */
911 ObtainWriteLock(&vcp
->pvmlock
, 211);
913 * First map its data area to a region in the current address space
914 * by calling vm_att with the subspace identifier, and a pointer to
915 * the data area. vm_att returns a new data area pointer, but we
916 * also want to hang onto the old one.
918 tmpaddr
= bp
->b_baddr
;
919 bp
->b_baddr
= (caddr_t
) vm_att(bp
->b_xmemd
.subspace_id
, tmpaddr
);
920 tmperr
= afs_ustrategy(bp
); /* temp variable saves offset calculation */
921 if (tmperr
) { /* in non-error case */
922 bp
->b_flags
|= B_ERROR
; /* should other flags remain set ??? */
923 bp
->b_error
= tmperr
;
926 /* Unmap the buffer's data area by calling vm_det. Reset data area
927 * to the value that we saved above.
930 bp
->b_baddr
= tmpaddr
;
933 * buffer may be linked with other buffers via the b_work field.
934 * See also afs_gn_strategy. For each buffer in the chain (including
935 * bp) notify all users of the buffer that the daemon is finished
936 * using it by calling iodone.
937 * assumes iodone can modify the b_work field.
940 tbp2
= (struct buf
*)tbp1
->b_work
;
945 tbp1
= (struct buf
*)tbp2
->b_work
;
951 ReleaseWriteLock(&vcp
->pvmlock
); /* Unlock the vnode. */
953 } /* infinite loop (unless we're interrupted) */
954 } /* end of afs_BioDaemon() */
956 #endif /* AFS_AIX41_ENV */
961 afs_BackgroundDaemon_once(void)
963 LOCK_INIT(&afs_xbrs
, "afs_xbrs");
964 memset(afs_brs
, 0, sizeof(afs_brs
));
966 #if defined (AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
968 * steal the first daemon for doing delayed DSlot flushing
969 * (see afs_GetDownDSlot)
978 brequest_release(struct brequest
*tb
)
981 AFS_RELE(AFSTOV(tb
->vc
)); /* MUST call vnode layer or could lose vnodes */
986 tb
->cred
= (afs_ucred_t
*)0;
988 afs_BRelease(tb
); /* this grabs and releases afs_xbrs lock */
993 afs_BackgroundDaemon(struct afs_uspc_param
*uspc
, void *param1
, void *param2
)
996 afs_BackgroundDaemon(void)
1002 AFS_STATCNT(afs_BackgroundDaemon
);
1003 /* initialize subsystem */
1005 /* Irix with "short stack" exits */
1006 afs_BackgroundDaemon_once();
1009 /* If it's a re-entering syscall, complete the request and release */
1010 if (uspc
->ts
> -1) {
1012 for (i
= 0; i
< NBRS
; i
++, tb
++) {
1013 if (tb
->ts
== uspc
->ts
) {
1014 /* copy the userspace status back in */
1015 ((struct afs_uspc_param
*) tb
->ptr_parm
[0])->retval
=
1017 /* mark it valid and notify our caller */
1018 tb
->flags
|= BUVALID
;
1019 if (tb
->flags
& BUWAIT
) {
1020 tb
->flags
&= ~BUWAIT
;
1023 brequest_release(tb
);
1028 afs_osi_MaskUserLoop();
1030 /* Otherwise it's a new one */
1036 ObtainWriteLock(&afs_xbrs
, 302);
1039 struct brequest
*min_tb
= NULL
;
1041 if (afs_termState
== AFSOP_STOP_BKG
) {
1042 if (--afs_nbrs
<= 0)
1043 afs_termState
= AFSOP_STOP_RXCALLBACK
;
1044 ReleaseWriteLock(&afs_xbrs
);
1045 afs_osi_Wakeup(&afs_termState
);
1053 /* find a request */
1056 for (i
= 0; i
< NBRS
; i
++, tb
++) {
1057 /* look for request with smallest ts */
1058 if ((tb
->refCount
> 0) && !(tb
->flags
& BSTARTED
)) {
1059 /* new request, not yet picked up */
1060 if ((min_tb
&& (min_ts
- tb
->ts
> 0)) || !min_tb
) {
1066 if ((tb
= min_tb
)) {
1067 /* claim and process this request */
1068 tb
->flags
|= BSTARTED
;
1069 ReleaseWriteLock(&afs_xbrs
);
1071 afs_Trace1(afs_iclSetp
, CM_TRACE_BKG1
, ICL_TYPE_INT32
,
1073 if (tb
->opcode
== BOP_FETCH
)
1075 #if defined(AFS_CACHE_BYPASS)
1076 else if (tb
->opcode
== BOP_FETCH_NOCACHE
)
1077 BPrefetchNoCache(tb
);
1079 else if (tb
->opcode
== BOP_STORE
)
1081 else if (tb
->opcode
== BOP_PATH
)
1083 #ifdef AFS_DARWIN80_ENV
1084 else if (tb
->opcode
== BOP_MOVE
) {
1085 memcpy(uspc
, (struct afs_uspc_param
*) tb
->ptr_parm
[0],
1086 sizeof(struct afs_uspc_param
));
1088 /* string lengths capped in move vop; copy NUL tho */
1089 memcpy(param1
, (char *)tb
->ptr_parm
[1],
1090 strlen(tb
->ptr_parm
[1])+1);
1091 memcpy(param2
, (char *)tb
->ptr_parm
[2],
1092 strlen(tb
->ptr_parm
[2])+1);
1096 else if (tb
->opcode
== BOP_PARTIAL_STORE
)
1099 panic("background bop");
1100 brequest_release(tb
);
1101 ObtainWriteLock(&afs_xbrs
, 305);
1104 /* wait for new request */
1106 ReleaseWriteLock(&afs_xbrs
);
1107 afs_osi_Sleep(&afs_brsDaemons
);
1108 ObtainWriteLock(&afs_xbrs
, 307);
1119 shutdown_daemons(void)
1121 AFS_STATCNT(shutdown_daemons
);
1122 if (afs_cold_shutdown
) {
1123 afs_brsDaemons
= brsInit
= 0;
1125 memset(afs_brs
, 0, sizeof(afs_brs
));
1126 memset(&afs_xbrs
, 0, sizeof(afs_lock_t
));
1128 #ifdef AFS_AIX41_ENV
1129 lock_free(&afs_asyncbuf_lock
);
1130 unpin(&afs_asyncbuf
, sizeof(struct buf
*));
1131 unpin(&afs_asyncbuf_cv
, sizeof(afs_int32
));
1137 #if defined(AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1139 * sgi - daemon - handles certain operations that otherwise
1140 * would use up too much kernel stack space
1142 * This all assumes that since the caller must have the xdcache lock
1143 * exclusively that the list will never be more than one long
1144 * and noone else can attempt to add anything until we're done.
1146 SV_TYPE afs_sgibksync
;
1147 SV_TYPE afs_sgibkwait
;
1148 lock_t afs_sgibklock
;
1149 struct dcache
*afs_sgibklist
;
1157 if (afs_sgibklock
== NULL
) {
1158 SV_INIT(&afs_sgibksync
, "bksync", 0, 0);
1159 SV_INIT(&afs_sgibkwait
, "bkwait", 0, 0);
1160 SPINLOCK_INIT(&afs_sgibklock
, "bklock");
1162 s
= SPLOCK(afs_sgibklock
);
1164 /* wait for something to do */
1165 SP_WAIT(afs_sgibklock
, s
, &afs_sgibksync
, PINOD
);
1166 osi_Assert(afs_sgibklist
);
1168 /* XX will probably need to generalize to real list someday */
1169 s
= SPLOCK(afs_sgibklock
);
1170 while (afs_sgibklist
) {
1171 tdc
= afs_sgibklist
;
1172 afs_sgibklist
= NULL
;
1173 SPUNLOCK(afs_sgibklock
, s
);
1175 tdc
->dflags
&= ~DFEntryMod
;
1176 osi_Assert(afs_WriteDCache(tdc
, 1) == 0);
1178 s
= SPLOCK(afs_sgibklock
);
1181 /* done all the work - wake everyone up */
1182 while (SV_SIGNAL(&afs_sgibkwait
));