Import Upstream version 1.8.5
[hcoop/debian/openafs.git] / src / afs / afs_daemons.c
CommitLineData
805e021f
CE
1/*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10#include <afsconfig.h>
11#include "afs/param.h"
12
13
14#ifdef AFS_AIX51_ENV
15#define __FULL_PROTO
16#include <sys/sleep.h>
17#endif
18
19#include "afs/sysincludes.h" /* Standard vendor system headers */
20#include "afsincludes.h" /* Afs-based standard headers */
21#include "afs/afs_stats.h" /* statistics gathering code */
22#include "afs/afs_cbqueue.h"
23#ifdef AFS_AIX_ENV
24#include <sys/adspace.h> /* for vm_att(), vm_det() */
25#endif
26
27#if defined(AFS_CACHE_BYPASS)
28#include "afs/afs_bypasscache.h"
29#endif /* AFS_CACHE_BYPASS */
30/* background request queue size */
31afs_lock_t afs_xbrs; /* lock for brs */
32static int brsInit = 0;
33short afs_brsWaiters = 0; /* number of users waiting for brs buffers */
34short afs_brsDaemons = 0; /* number of daemons waiting for brs requests */
35struct brequest afs_brs[NBRS]; /* request structures */
36struct afs_osi_WaitHandle AFS_WaitHandler, AFS_CSWaitHandler;
37static int afs_brs_count = 0; /* request counter, to service reqs in order */
38
39/* PAG garbage collection */
40/* We induce a compile error if param.h does not define AFS_GCPAGS */
41afs_int32 afs_gcpags = AFS_GCPAGS;
42afs_int32 afs_gcpags_procsize = 0;
43
44afs_int32 afs_CheckServerDaemonStarted = 0;
45#ifndef DEFAULT_PROBE_INTERVAL
46#define DEFAULT_PROBE_INTERVAL 30 /* default to 3 min */
47#endif
48afs_int32 afs_probe_interval = DEFAULT_PROBE_INTERVAL;
49afs_int32 afs_probe_all_interval = 600;
50afs_int32 afs_nat_probe_interval = 60;
51afs_int32 afs_preCache = 0;
52
53#define PROBE_WAIT() (1000 * (afs_probe_interval - ((afs_random() & 0x7fffffff) \
54 % (afs_probe_interval/2))))
55
56void
57afs_SetCheckServerNATmode(int isnat)
58{
59 static afs_int32 old_intvl, old_all_intvl;
60 static int wasnat;
61
62 if (isnat && !wasnat) {
63 old_intvl = afs_probe_interval;
64 old_all_intvl = afs_probe_all_interval;
65 afs_probe_interval = afs_nat_probe_interval;
66 afs_probe_all_interval = afs_nat_probe_interval;
67 afs_osi_CancelWait(&AFS_CSWaitHandler);
68 } else if (!isnat && wasnat) {
69 afs_probe_interval = old_intvl;
70 afs_probe_all_interval = old_all_intvl;
71 }
72 wasnat = isnat;
73}
74
75void
76afs_CheckServerDaemon(void)
77{
78 afs_int32 now, delay, lastCheck, last10MinCheck;
79
80 afs_CheckServerDaemonStarted = 1;
81
82 while (afs_initState < 101)
83 afs_osi_Sleep(&afs_initState);
84 afs_osi_Wait(PROBE_WAIT(), &AFS_CSWaitHandler, 0);
85
86 last10MinCheck = lastCheck = osi_Time();
87 while (1) {
88 if (afs_termState == AFSOP_STOP_CS) {
89 afs_termState = AFSOP_STOP_TRUNCDAEMON;
90 afs_osi_Wakeup(&afs_termState);
91 break;
92 }
93
94 now = osi_Time();
95 if (afs_probe_interval + lastCheck <= now) {
96 afs_CheckServers(1, NULL); /* check down servers */
97 lastCheck = now = osi_Time();
98 }
99
100 if (afs_probe_all_interval + last10MinCheck <= now) {
101 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, afs_probe_all_interval);
102 afs_CheckServers(0, NULL);
103 last10MinCheck = now = osi_Time();
104 }
105 /* shutdown check. */
106 if (afs_termState == AFSOP_STOP_CS) {
107 afs_termState = AFSOP_STOP_TRUNCDAEMON;
108 afs_osi_Wakeup(&afs_termState);
109 break;
110 }
111
112 /* Compute time to next probe. */
113 delay = afs_probe_interval + lastCheck;
114 if (delay > afs_probe_all_interval + last10MinCheck)
115 delay = afs_probe_all_interval + last10MinCheck;
116 delay -= now;
117 if (delay < 1)
118 delay = 1;
119 afs_osi_Wait(delay * 1000, &AFS_CSWaitHandler, 0);
120 }
121 afs_CheckServerDaemonStarted = 0;
122}
123
124extern int vfs_context_ref;
125
126/* This function always holds the GLOCK whilst it is running. The caller
127 * gets the GLOCK before invoking it, and afs_osi_Sleep drops the GLOCK
128 * whilst we are sleeping, and regains it when we're woken up.
129 */
130void
131afs_Daemon(void)
132{
133 afs_int32 code;
134 struct afs_exporter *exporter;
135 afs_int32 now;
136 afs_int32 last3MinCheck, last10MinCheck, last60MinCheck, lastNMinCheck;
137 afs_int32 last1MinCheck, last5MinCheck;
138 afs_uint32 lastCBSlotBump;
139
140 AFS_STATCNT(afs_Daemon);
141
142 afs_rootFid.Fid.Volume = 0;
143 while (afs_initState < 101)
144 afs_osi_Sleep(&afs_initState);
145
146#ifdef AFS_DARWIN80_ENV
147 if (afs_osi_ctxtp_initialized)
148 osi_Panic("vfs context already initialized");
149 while (afs_osi_ctxtp && vfs_context_ref)
150 afs_osi_Sleep(&afs_osi_ctxtp);
151 if (afs_osi_ctxtp && !vfs_context_ref)
152 vfs_context_rele(afs_osi_ctxtp);
153 afs_osi_ctxtp = vfs_context_create(NULL);
154 afs_osi_ctxtp_initialized = 1;
155#endif
156 now = osi_Time();
157 lastCBSlotBump = now;
158
159 /* when a lot of clients are booted simultaneously, they develop
160 * annoying synchronous VL server bashing behaviors. So we stagger them.
161 */
162 last1MinCheck = now + ((afs_random() & 0x7fffffff) % 60); /* an extra 30 */
163 last3MinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
164 last60MinCheck = now - 1800 + ((afs_random() & 0x7fffffff) % 3600);
165 last10MinCheck = now - 300 + ((afs_random() & 0x7fffffff) % 600);
166 last5MinCheck = now - 150 + ((afs_random() & 0x7fffffff) % 300);
167 lastNMinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180);
168
169 /* start off with afs_initState >= 101 (basic init done) */
170 while (1) {
171 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
172
173 /* things to do every 20 seconds or less - required by protocol spec */
174 if (afs_nfsexporter)
175 afs_FlushActiveVcaches(0); /* flush NFS writes */
176 afs_FlushVCBs(1); /* flush queued callbacks */
177
178 afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */
179 rx_CheckPackets(); /* Does RX need more packets? */
180
181 now = osi_Time();
182 if (lastCBSlotBump + CBHTSLOTLEN < now) { /* pretty time-dependant */
183 lastCBSlotBump = now;
184 if (afs_BumpBase()) {
185 afs_CheckCallbacks(20); /* unstat anything which will expire soon */
186 }
187 }
188
189 if (last1MinCheck + 60 < now) {
190 /* things to do every minute */
191 DFlush(); /* write out dir buffers */
192 (void)afs_WriteThroughDSlots(); /* write through cacheinfo entries */
193 ObtainWriteLock(&afs_xvcache, 736);
194 afs_FlushReclaimedVcaches();
195 ReleaseWriteLock(&afs_xvcache);
196 afs_FlushActiveVcaches(1); /* keep flocks held & flush nfs writes */
197#if 0
198 afs_StoreDirtyVcaches();
199#endif
200 last1MinCheck = now;
201 }
202
203 if (last3MinCheck + 180 < now) {
204 afs_CheckTokenCache(); /* check for access cache resets due to expired
205 * tickets */
206 last3MinCheck = now;
207 }
208
209 if (afsd_dynamic_vcaches && (last5MinCheck + 300 < now)) {
210 /* start with trying to drop us back to our base usage */
211 int anumber = VCACHE_FREE + (afs_vcount - afs_cacheStats);
212
213 if (anumber > 0) {
214 ObtainWriteLock(&afs_xvcache, 734);
215 afs_ShakeLooseVCaches(anumber);
216 ReleaseWriteLock(&afs_xvcache);
217 }
218 last5MinCheck = now;
219 }
220
221 if (!afs_CheckServerDaemonStarted) {
222 if (lastNMinCheck + afs_probe_interval < now) {
223 /* only check down servers */
224 afs_CheckServers(1, NULL);
225 lastNMinCheck = now;
226 }
227 }
228 if (last10MinCheck + 600 < now) {
229#ifdef AFS_USERSPACE_IP_ADDR
230 extern int rxi_GetcbiInfo(void);
231#endif
232 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600);
233#ifdef AFS_USERSPACE_IP_ADDR
234 if (rxi_GetcbiInfo()) { /* addresses changed from last time */
235 afs_FlushCBs();
236 }
237#else /* AFS_USERSPACE_IP_ADDR */
238 if (rxi_GetIFInfo()) { /* addresses changed from last time */
239 afs_FlushCBs();
240 }
241#endif /* else AFS_USERSPACE_IP_ADDR */
242 if (!afs_CheckServerDaemonStarted)
243 afs_CheckServers(0, NULL);
244 afs_GCUserData(); /* gc old conns */
245 /* This is probably the wrong way of doing GC for the various exporters but it will suffice for a while */
246 for (exporter = root_exported; exporter;
247 exporter = exporter->exp_next) {
248 (void)EXP_GC(exporter, 0); /* Generalize params */
249 }
250 {
251 static int cnt = 0;
252 if (++cnt < 12) {
253 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
254 AFS_VOLCHECK_BUSY);
255 } else {
256 cnt = 0;
257 afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED |
258 AFS_VOLCHECK_BUSY |
259 AFS_VOLCHECK_MTPTS);
260 }
261 }
262 last10MinCheck = now;
263 }
264 if (last60MinCheck + 3600 < now) {
265 afs_Trace1(afs_iclSetp, CM_TRACE_PROBEVOLUME, ICL_TYPE_INT32,
266 3600);
267 afs_CheckRootVolume();
268#if AFS_GCPAGS
269 if (afs_gcpags == AFS_GCPAGS_OK) {
270 afs_int32 didany;
271 afs_GCPAGs(&didany);
272 }
273#endif
274 last60MinCheck = now;
275 }
276 if (afs_initState < 300) { /* while things ain't rosy */
277 code = afs_CheckRootVolume();
278 if (code == 0)
279 afs_initState = 300; /* succeeded */
280 if (afs_initState < 200)
281 afs_initState = 200; /* tried once */
282 afs_osi_Wakeup(&afs_initState);
283 }
284
285 /* 18285 is because we're trying to divide evenly into 128, that is,
286 * CBSlotLen, while staying just under 20 seconds. If CBSlotLen
287 * changes, should probably change this interval, too.
288 * Some of the preceding actions may take quite some time, so we
289 * might not want to wait the entire interval */
290 now = 18285 - (osi_Time() - now);
291 if (now > 0) {
292 afs_osi_Wait(now, &AFS_WaitHandler, 0);
293 }
294
295 if (afs_termState == AFSOP_STOP_AFS) {
296 if (afs_CheckServerDaemonStarted)
297 afs_termState = AFSOP_STOP_CS;
298 else
299 afs_termState = AFSOP_STOP_TRUNCDAEMON;
300 afs_osi_Wakeup(&afs_termState);
301 return;
302 }
303 }
304}
305
306int
307afs_CheckRootVolume(void)
308{
309 char rootVolName[MAXROOTVOLNAMELEN];
310 struct volume *tvp = NULL;
311 int usingDynroot = afs_GetDynrootEnable();
312 int localcell;
313
314 AFS_STATCNT(afs_CheckRootVolume);
315 if (*afs_rootVolumeName == 0) {
316 strcpy(rootVolName, "root.afs");
317 } else {
318 strcpy(rootVolName, afs_rootVolumeName);
319 }
320
321 if (usingDynroot) {
322 afs_GetDynrootFid(&afs_rootFid);
323 tvp = afs_GetVolume(&afs_rootFid, NULL, READ_LOCK);
324 } else {
325 struct cell *lc = afs_GetPrimaryCell(READ_LOCK);
326
327 if (!lc)
328 return ENOENT;
329 localcell = lc->cellNum;
330 afs_PutCell(lc, READ_LOCK);
331 tvp = afs_GetVolumeByName(rootVolName, localcell, 1, NULL, READ_LOCK);
332 if (!tvp) {
333 char buf[128];
334 int len = strlen(rootVolName);
335
336 if ((len < 9) || strcmp(&rootVolName[len - 9], ".readonly")) {
337 strcpy(buf, rootVolName);
338 afs_strcat(buf, ".readonly");
339 tvp = afs_GetVolumeByName(buf, localcell, 1, NULL, READ_LOCK);
340 }
341 }
342 if (tvp) {
343 int volid = (tvp->roVol ? tvp->roVol : tvp->volume);
344 afs_rootFid.Cell = localcell;
345 if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid
346 && afs_globalVp) {
347 /* If we had a root fid before and it changed location we reset
348 * the afs_globalVp so that it will be reevaluated.
349 * Just decrement the reference count. This only occurs during
350 * initial cell setup and can panic the machine if we set the
351 * count to zero and fs checkv is executed when the current
352 * directory is /afs.
353 */
354#ifdef AFS_LINUX22_ENV
355 osi_ResetRootVCache(volid);
356#else
357# ifdef AFS_DARWIN80_ENV
358 afs_PutVCache(afs_globalVp);
359# else
360 AFS_FAST_RELE(afs_globalVp);
361# endif
362 afs_globalVp = 0;
363#endif
364 }
365 afs_rootFid.Fid.Volume = volid;
366 afs_rootFid.Fid.Vnode = 1;
367 afs_rootFid.Fid.Unique = 1;
368 }
369 }
370 if (tvp) {
371 afs_initState = 300; /* won */
372 afs_osi_Wakeup(&afs_initState);
373 afs_PutVolume(tvp, READ_LOCK);
374 }
375 if (afs_rootFid.Fid.Volume)
376 return 0;
377 else
378 return ENOENT;
379}
380
381/* ptr_parm 0 is the pathname, size_parm 0 to the fetch is the chunk number */
382static void
383BPath(struct brequest *ab)
384{
385 struct dcache *tdc = NULL;
386 struct vcache *tvc = NULL;
387 struct vnode *tvn = NULL;
388#ifdef AFS_LINUX22_ENV
389 struct dentry *dp = NULL;
390#endif
391 afs_size_t offset, len;
392 struct vrequest *treq = NULL;
393 afs_int32 code;
394
395 AFS_STATCNT(BPath);
396 if ((code = afs_CreateReq(&treq, ab->cred))) {
397 return;
398 }
399 AFS_GUNLOCK();
400#ifdef AFS_LINUX22_ENV
401 code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, &dp);
402 if (dp)
403 tvn = (struct vnode *)dp->d_inode;
404#else
405 code = gop_lookupname((char *)ab->ptr_parm[0], AFS_UIOSYS, 1, &tvn);
406#endif
407 AFS_GLOCK();
408 osi_FreeLargeSpace((char *)ab->ptr_parm[0]); /* free path name buffer here */
409 if (code) {
410 afs_DestroyReq(treq);
411 return;
412 }
413 /* now path may not have been in afs, so check that before calling our cache manager */
414 if (!tvn || !IsAfsVnode(tvn)) {
415 /* release it and give up */
416 if (tvn) {
417#ifdef AFS_LINUX22_ENV
418 dput(dp);
419#else
420 AFS_RELE(tvn);
421#endif
422 }
423 afs_DestroyReq(treq);
424 return;
425 }
426 tvc = VTOAFS(tvn);
427 /* here we know its an afs vnode, so we can get the data for the chunk */
428 tdc = afs_GetDCache(tvc, ab->size_parm[0], treq, &offset, &len, 1);
429 if (tdc) {
430 afs_PutDCache(tdc);
431 }
432#ifdef AFS_LINUX22_ENV
433 dput(dp);
434#else
435 AFS_RELE(tvn);
436#endif
437 afs_DestroyReq(treq);
438}
439
440/* size_parm 0 to the fetch is the chunk number,
441 * ptr_parm 0 is the dcache entry to wakeup,
442 * size_parm 1 is true iff we should release the dcache entry here.
443 */
444static void
445BPrefetch(struct brequest *ab)
446{
447 struct dcache *tdc;
448 struct vcache *tvc;
449 afs_size_t offset, len, abyte, totallen = 0;
450 struct vrequest *treq = NULL;
451 int code;
452
453 AFS_STATCNT(BPrefetch);
454 if ((code = afs_CreateReq(&treq, ab->cred)))
455 return;
456 abyte = ab->size_parm[0];
457 tvc = ab->vc;
458 do {
459 tdc = afs_GetDCache(tvc, abyte, treq, &offset, &len, 1);
460 if (tdc) {
461 afs_PutDCache(tdc);
462 }
463 abyte+=len;
464 totallen += len;
465 } while ((totallen < afs_preCache) && tdc && (len > 0));
466 /* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't
467 * use tdc from GetDCache since afs_GetDCache may fail, but someone may
468 * be waiting for our wakeup anyway.
469 */
470 tdc = (struct dcache *)(ab->ptr_parm[0]);
471 ObtainSharedLock(&tdc->lock, 640);
472 if (tdc->mflags & DFFetchReq) {
473 UpgradeSToWLock(&tdc->lock, 641);
474 tdc->mflags &= ~DFFetchReq;
475 ReleaseWriteLock(&tdc->lock);
476 } else {
477 ReleaseSharedLock(&tdc->lock);
478 }
479 afs_osi_Wakeup(&tdc->validPos);
480 if (ab->size_parm[1]) {
481 afs_PutDCache(tdc); /* put this one back, too */
482 }
483 afs_DestroyReq(treq);
484}
485
486#if defined(AFS_CACHE_BYPASS)
487static void
488BPrefetchNoCache(struct brequest *ab)
489{
490 struct vrequest *treq = NULL;
491 int code;
492
493 if ((code = afs_CreateReq(&treq, ab->cred)))
494 return;
495
496#ifndef UKERNEL
497 /* OS-specific prefetch routine */
498 afs_PrefetchNoCache(ab->vc, ab->cred, (struct nocache_read_request *) ab->ptr_parm[0]);
499#endif
500 afs_DestroyReq(treq);
501}
502#endif
503
504static void
505BStore(struct brequest *ab)
506{
507 struct vcache *tvc;
508 afs_int32 code;
509 struct vrequest *treq = NULL;
510#if defined(AFS_SGI_ENV)
511 struct cred *tmpcred;
512#endif
513
514 AFS_STATCNT(BStore);
515 if ((code = afs_CreateReq(&treq, ab->cred)))
516 return;
517 tvc = ab->vc;
518#if defined(AFS_SGI_ENV)
519 /*
520 * Since StoreOnLastReference can end up calling osi_SyncVM which
521 * calls into VM code that assumes that u.u_cred has the
522 * correct credentials, we set our to theirs for this xaction
523 */
524 tmpcred = OSI_GET_CURRENT_CRED();
525 OSI_SET_CURRENT_CRED(ab->cred);
526
527 /*
528 * To avoid recursion since the WriteLock may be released during VM
529 * operations, we hold the VOP_RWLOCK across this transaction as
530 * do the other callers of StoreOnLastReference
531 */
532 AFS_RWLOCK((vnode_t *) tvc, 1);
533#endif
534 ObtainWriteLock(&tvc->lock, 209);
535 code = afs_StoreOnLastReference(tvc, treq);
536 ReleaseWriteLock(&tvc->lock);
537#if defined(AFS_SGI_ENV)
538 OSI_SET_CURRENT_CRED(tmpcred);
539 AFS_RWUNLOCK((vnode_t *) tvc, 1);
540#endif
541 /* now set final return code, and wakeup anyone waiting */
542 if ((ab->flags & BUVALID) == 0) {
543
544 /* To explain code_raw/code_checkcode:
545 * Anyone that's waiting won't have our treq, so they won't be able to
546 * call afs_CheckCode themselves on the return code we provide here.
547 * But if we give back only the afs_CheckCode value, they won't know
548 * what the "raw" value was. So give back both values, so the waiter
549 * can know the "raw" value for interpreting the value internally, as
550 * well as the afs_CheckCode value to give to the OS. */
551 ab->code_raw = code;
552 ab->code_checkcode = afs_CheckCode(code, treq, 430);
553
554 ab->flags |= BUVALID;
555 if (ab->flags & BUWAIT) {
556 ab->flags &= ~BUWAIT;
557 afs_osi_Wakeup(ab);
558 }
559 }
560 afs_DestroyReq(treq);
561}
562
563static void
564BPartialStore(struct brequest *ab)
565{
566 struct vcache *tvc;
567 afs_int32 code;
568 struct vrequest *treq = NULL;
569 int locked, shared_locked = 0;
570
571 AFS_STATCNT(BStore);
572 if ((code = afs_CreateReq(&treq, ab->cred)))
573 return;
574 tvc = ab->vc;
575 locked = tvc->lock.excl_locked? 1:0;
576 if (!locked)
577 ObtainWriteLock(&tvc->lock, 1209);
578 else if (!(tvc->lock.excl_locked & WRITE_LOCK)) {
579 shared_locked = 1;
580 ConvertSToRLock(&tvc->lock);
581 }
582 code = afs_StoreAllSegments(tvc, treq, AFS_ASYNC);
583 if (!locked)
584 ReleaseWriteLock(&tvc->lock);
585 else if (shared_locked)
586 ConvertSToRLock(&tvc->lock);
587 /* now set final return code, and wakeup anyone waiting */
588 if ((ab->flags & BUVALID) == 0) {
589 /* set final code, since treq doesn't go across processes */
590 ab->code_raw = code;
591 ab->code_checkcode = afs_CheckCode(code, treq, 43);
592 ab->flags |= BUVALID;
593 if (ab->flags & BUWAIT) {
594 ab->flags &= ~BUWAIT;
595 afs_osi_Wakeup(ab);
596 }
597 }
598 afs_DestroyReq(treq);
599}
600
601/* release a held request buffer */
602void
603afs_BRelease(struct brequest *ab)
604{
605
606 AFS_STATCNT(afs_BRelease);
607 ObtainWriteLock(&afs_xbrs, 294);
608 if (--ab->refCount <= 0) {
609 ab->flags = 0;
610 }
611 if (afs_brsWaiters)
612 afs_osi_Wakeup(&afs_brsWaiters);
613 ReleaseWriteLock(&afs_xbrs);
614}
615
616/* return true if bkg fetch daemons are all busy */
617int
618afs_BBusy(void)
619{
620 AFS_STATCNT(afs_BBusy);
621 if (afs_brsDaemons > 0)
622 return 0;
623 return 1;
624}
625
626struct brequest *
627afs_BQueue(short aopcode, struct vcache *avc,
628 afs_int32 dontwait, afs_int32 ause, afs_ucred_t *acred,
629 afs_size_t asparm0, afs_size_t asparm1, void *apparm0,
630 void *apparm1, void *apparm2)
631{
632 int i;
633 struct brequest *tb;
634
635 AFS_STATCNT(afs_BQueue);
636 ObtainWriteLock(&afs_xbrs, 296);
637 while (1) {
638 tb = afs_brs;
639 for (i = 0; i < NBRS; i++, tb++) {
640 if (tb->refCount == 0)
641 break;
642 }
643 if (i < NBRS) {
644 /* found a buffer */
645 tb->opcode = aopcode;
646 tb->vc = avc;
647 tb->cred = acred;
648 if (tb->cred) {
649 crhold(tb->cred);
650 }
651 if (avc) {
652 AFS_FAST_HOLD(avc);
653 }
654 tb->refCount = ause + 1;
655 tb->size_parm[0] = asparm0;
656 tb->size_parm[1] = asparm1;
657 tb->ptr_parm[0] = apparm0;
658 tb->ptr_parm[1] = apparm1;
659 tb->ptr_parm[2] = apparm2;
660 tb->flags = 0;
661 tb->code_raw = tb->code_checkcode = 0;
662 tb->ts = afs_brs_count++;
663 /* if daemons are waiting for work, wake them up */
664 if (afs_brsDaemons > 0) {
665 afs_osi_Wakeup(&afs_brsDaemons);
666 }
667 ReleaseWriteLock(&afs_xbrs);
668 return tb;
669 }
670 if (dontwait) {
671 ReleaseWriteLock(&afs_xbrs);
672 return NULL;
673 }
674 /* no free buffers, sleep a while */
675 afs_brsWaiters++;
676 ReleaseWriteLock(&afs_xbrs);
677 afs_osi_Sleep(&afs_brsWaiters);
678 ObtainWriteLock(&afs_xbrs, 301);
679 afs_brsWaiters--;
680 }
681}
682
683#ifdef AFS_AIX41_ENV
684/* AIX 4.1 has a much different sleep/wakeup mechanism available for use.
685 * The modifications here will work for either a UP or MP machine.
686 */
687struct buf *afs_asyncbuf = (struct buf *)0;
688tid_t afs_asyncbuf_cv = EVENT_NULL;
689afs_int32 afs_biodcnt = 0;
690
691/* in implementing this, I assumed that all external linked lists were
692 * null-terminated.
693 *
694 * Several places in this code traverse a linked list. The algorithm
695 * used here is probably unfamiliar to most people. Careful examination
696 * will show that it eliminates an assignment inside the loop, as compared
697 * to the standard algorithm, at the cost of occasionally using an extra
698 * variable.
699 */
700
701/* get_bioreq()
702 *
703 * This function obtains, and returns, a pointer to a buffer for
704 * processing by a daemon. It sleeps until such a buffer is available.
705 * The source of buffers for it is the list afs_asyncbuf (see also
706 * afs_gn_strategy). This function may be invoked concurrently by
707 * several processes, that is, several instances of the same daemon.
708 * afs_gn_strategy, which adds buffers to the list, runs at interrupt
709 * level, while get_bioreq runs at process level.
710 *
711 * Since AIX 4.1 can wake just one process at a time, the separate sleep
712 * addresses have been removed.
713 * Note that the kernel_lock is held until the e_sleep_thread() occurs.
714 * The afs_asyncbuf_lock is primarily used to serialize access between
715 * process and interrupts.
716 */
717Simple_lock afs_asyncbuf_lock;
718struct buf *
719afs_get_bioreq()
720{
721 struct buf *bp = NULL;
722 struct buf *bestbp;
723 struct buf **bestlbpP, **lbpP;
724 long bestage, stop;
725 struct buf *t1P, *t2P; /* temp pointers for list manipulation */
726 int oldPriority;
727 afs_uint32 wait_ret;
728 struct afs_bioqueue *s;
729
730 /* ??? Does the forward pointer of the returned buffer need to be NULL?
731 */
732
733 /* Disable interrupts from the strategy function, and save the
734 * prior priority level and lock access to the afs_asyncbuf.
735 */
736 AFS_GUNLOCK();
737 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
738
739 while (1) {
740 if (afs_asyncbuf) {
741 /* look for oldest buffer */
742 bp = bestbp = afs_asyncbuf;
743 bestage = (long)bestbp->av_back;
744 bestlbpP = &afs_asyncbuf;
745 while (1) {
746 lbpP = &bp->av_forw;
747 bp = *lbpP;
748 if (!bp)
749 break;
750 if ((long)bp->av_back - bestage < 0) {
751 bestbp = bp;
752 bestlbpP = lbpP;
753 bestage = (long)bp->av_back;
754 }
755 }
756 bp = bestbp;
757 *bestlbpP = bp->av_forw;
758 break;
759 } else {
760 /* If afs_asyncbuf is null, it is necessary to go to sleep.
761 * e_wakeup_one() ensures that only one thread wakes.
762 */
763 int interrupted;
764 /* The LOCK_HANDLER indicates to e_sleep_thread to only drop the
765 * lock on an MP machine.
766 */
767 interrupted =
768 e_sleep_thread(&afs_asyncbuf_cv, &afs_asyncbuf_lock,
769 LOCK_HANDLER | INTERRUPTIBLE);
770 if (interrupted == THREAD_INTERRUPTED) {
771 /* re-enable interrupts from strategy */
772 unlock_enable(oldPriority, &afs_asyncbuf_lock);
773 AFS_GLOCK();
774 return (NULL);
775 }
776 } /* end of "else asyncbuf is empty" */
777 } /* end of "inner loop" */
778
779 /*assert (bp); */
780
781 unlock_enable(oldPriority, &afs_asyncbuf_lock);
782 AFS_GLOCK();
783
784 /* For the convenience of other code, replace the gnodes in
785 * the b_vp field of bp and the other buffers on the b_work
786 * chain with the corresponding vnodes.
787 *
788 * ??? what happens to the gnodes? They're not just cut loose,
789 * are they?
790 */
791 for (t1P = bp;;) {
792 t2P = (struct buf *)t1P->b_work;
793 t1P->b_vp = ((struct gnode *)t1P->b_vp)->gn_vnode;
794 if (!t2P)
795 break;
796
797 t1P = (struct buf *)t2P->b_work;
798 t2P->b_vp = ((struct gnode *)t2P->b_vp)->gn_vnode;
799 if (!t1P)
800 break;
801 }
802
803 /* If the buffer does not specify I/O, it may immediately
804 * be returned to the caller. This condition is detected
805 * by examining the buffer's flags (the b_flags field). If
806 * the B_PFPROT bit is set, the buffer represents a protection
807 * violation, rather than a request for I/O. The remainder
808 * of the outer loop handles the case where the B_PFPROT bit is clear.
809 */
810 if (bp->b_flags & B_PFPROT) {
811 return (bp);
812 }
813 return (bp);
814
815} /* end of function get_bioreq() */
816
817
818/* afs_BioDaemon
819 *
820 * This function is the daemon. It is called from the syscall
821 * interface. Ordinarily, a script or an administrator will run a
822 * daemon startup utility, specifying the number of I/O daemons to
823 * run. The utility will fork off that number of processes,
824 * each making the appropriate syscall, which will cause this
825 * function to be invoked.
826 */
827static int afs_initbiod = 0; /* this is self-initializing code */
828int DOvmlock = 0;
829int
830afs_BioDaemon(afs_int32 nbiods)
831{
832 afs_int32 code, s, pflg = 0;
833 label_t jmpbuf;
834 struct buf *bp, *bp1, *tbp1, *tbp2; /* temp pointers only */
835 caddr_t tmpaddr;
836 struct vnode *vp;
837 struct vcache *vcp;
838 char tmperr;
839 if (!afs_initbiod) {
840 /* XXX ###1 XXX */
841 afs_initbiod = 1;
842 /* pin lock, since we'll be using it in an interrupt. */
843 lock_alloc(&afs_asyncbuf_lock, LOCK_ALLOC_PIN, 2, 1);
844 simple_lock_init(&afs_asyncbuf_lock);
845 pin(&afs_asyncbuf, sizeof(struct buf *));
846 pin(&afs_asyncbuf_cv, sizeof(afs_int32));
847 }
848
849 /* Ignore HUP signals... */
850 {
851 sigset_t sigbits, osigbits;
852 /*
853 * add SIGHUP to the set of already masked signals
854 */
855 SIGFILLSET(sigbits); /* allow all signals */
856 SIGDELSET(sigbits, SIGHUP); /* except SIGHUP */
857 limit_sigs(&sigbits, &osigbits); /* and already masked */
858 }
859 /* Main body starts here -- this is an intentional infinite loop, and
860 * should NEVER exit
861 *
862 * Now, the loop will exit if get_bioreq() returns NULL, indicating
863 * that we've been interrupted.
864 */
865 while (1) {
866 bp = afs_get_bioreq();
867 if (!bp)
868 break; /* we were interrupted */
869 if (code = setjmpx(&jmpbuf)) {
870 /* This should not have happend, maybe a lack of resources */
871 AFS_GUNLOCK();
872 s = disable_lock(INTMAX, &afs_asyncbuf_lock);
873 for (bp1 = bp; bp; bp = bp1) {
874 if (bp1)
875 bp1 = (struct buf *)bp1->b_work;
876 bp->b_actf = 0;
877 bp->b_error = code;
878 bp->b_flags |= B_ERROR;
879 iodone(bp);
880 }
881 unlock_enable(s, &afs_asyncbuf_lock);
882 AFS_GLOCK();
883 continue;
884 }
885 vcp = VTOAFS(bp->b_vp);
886 if (bp->b_flags & B_PFSTORE) { /* XXXX */
887 ObtainWriteLock(&vcp->lock, 404);
888 if (vcp->v.v_gnode->gn_mwrcnt) {
889 afs_offs_t newlength =
890 (afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
891 if (vcp->f.m.Length < newlength) {
892 afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
893 ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,
894 __LINE__, ICL_TYPE_OFFSET,
895 ICL_HANDLE_OFFSET(vcp->f.m.Length),
896 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
897 vcp->f.m.Length = newlength;
898 }
899 }
900 ReleaseWriteLock(&vcp->lock);
901 }
902 /* If the buffer represents a protection violation, rather than
903 * an actual request for I/O, no special action need be taken.
904 */
905 if (bp->b_flags & B_PFPROT) {
906 iodone(bp); /* Notify all users of the buffer that we're done */
907 clrjmpx(&jmpbuf);
908 continue;
909 }
910 if (DOvmlock)
911 ObtainWriteLock(&vcp->pvmlock, 211);
912 /*
913 * First map its data area to a region in the current address space
914 * by calling vm_att with the subspace identifier, and a pointer to
915 * the data area. vm_att returns a new data area pointer, but we
916 * also want to hang onto the old one.
917 */
918 tmpaddr = bp->b_baddr;
919 bp->b_baddr = (caddr_t) vm_att(bp->b_xmemd.subspace_id, tmpaddr);
920 tmperr = afs_ustrategy(bp); /* temp variable saves offset calculation */
921 if (tmperr) { /* in non-error case */
922 bp->b_flags |= B_ERROR; /* should other flags remain set ??? */
923 bp->b_error = tmperr;
924 }
925
926 /* Unmap the buffer's data area by calling vm_det. Reset data area
927 * to the value that we saved above.
928 */
929 vm_det(bp->b_baddr);
930 bp->b_baddr = tmpaddr;
931
932 /*
933 * buffer may be linked with other buffers via the b_work field.
934 * See also afs_gn_strategy. For each buffer in the chain (including
935 * bp) notify all users of the buffer that the daemon is finished
936 * using it by calling iodone.
937 * assumes iodone can modify the b_work field.
938 */
939 for (tbp1 = bp;;) {
940 tbp2 = (struct buf *)tbp1->b_work;
941 iodone(tbp1);
942 if (!tbp2)
943 break;
944
945 tbp1 = (struct buf *)tbp2->b_work;
946 iodone(tbp2);
947 if (!tbp1)
948 break;
949 }
950 if (DOvmlock)
951 ReleaseWriteLock(&vcp->pvmlock); /* Unlock the vnode. */
952 clrjmpx(&jmpbuf);
953 } /* infinite loop (unless we're interrupted) */
954} /* end of afs_BioDaemon() */
955
956#endif /* AFS_AIX41_ENV */
957
958
959int afs_nbrs = 0;
960static_inline void
961afs_BackgroundDaemon_once(void)
962{
963 LOCK_INIT(&afs_xbrs, "afs_xbrs");
964 memset(afs_brs, 0, sizeof(afs_brs));
965 brsInit = 1;
966#if defined (AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
967 /*
968 * steal the first daemon for doing delayed DSlot flushing
969 * (see afs_GetDownDSlot)
970 */
971 AFS_GUNLOCK();
972 afs_sgidaemon();
973 exit(CLD_EXITED, 0);
974#endif
975}
976
977static_inline void
978brequest_release(struct brequest *tb)
979{
980 if (tb->vc) {
981 AFS_RELE(AFSTOV(tb->vc)); /* MUST call vnode layer or could lose vnodes */
982 tb->vc = NULL;
983 }
984 if (tb->cred) {
985 crfree(tb->cred);
986 tb->cred = (afs_ucred_t *)0;
987 }
988 afs_BRelease(tb); /* this grabs and releases afs_xbrs lock */
989}
990
991#ifdef AFS_NEW_BKG
992int
993afs_BackgroundDaemon(struct afs_uspc_param *uspc, void *param1, void *param2)
994#else
995void
996afs_BackgroundDaemon(void)
997#endif
998{
999 struct brequest *tb;
1000 int i, foundAny;
1001
1002 AFS_STATCNT(afs_BackgroundDaemon);
1003 /* initialize subsystem */
1004 if (brsInit == 0)
1005 /* Irix with "short stack" exits */
1006 afs_BackgroundDaemon_once();
1007
1008#ifdef AFS_NEW_BKG
1009 /* If it's a re-entering syscall, complete the request and release */
1010 if (uspc->ts > -1) {
1011 tb = afs_brs;
1012 for (i = 0; i < NBRS; i++, tb++) {
1013 if (tb->ts == uspc->ts) {
1014 /* copy the userspace status back in */
1015 ((struct afs_uspc_param *) tb->ptr_parm[0])->retval =
1016 uspc->retval;
1017 /* mark it valid and notify our caller */
1018 tb->flags |= BUVALID;
1019 if (tb->flags & BUWAIT) {
1020 tb->flags &= ~BUWAIT;
1021 afs_osi_Wakeup(tb);
1022 }
1023 brequest_release(tb);
1024 break;
1025 }
1026 }
1027 } else {
1028 afs_osi_MaskUserLoop();
1029#endif
1030 /* Otherwise it's a new one */
1031 afs_nbrs++;
1032#ifdef AFS_NEW_BKG
1033 }
1034#endif
1035
1036 ObtainWriteLock(&afs_xbrs, 302);
1037 while (1) {
1038 int min_ts = 0;
1039 struct brequest *min_tb = NULL;
1040
1041 if (afs_termState == AFSOP_STOP_BKG) {
1042 if (--afs_nbrs <= 0)
1043 afs_termState = AFSOP_STOP_RXCALLBACK;
1044 ReleaseWriteLock(&afs_xbrs);
1045 afs_osi_Wakeup(&afs_termState);
1046#ifdef AFS_NEW_BKG
1047 return -2;
1048#else
1049 return;
1050#endif
1051 }
1052
1053 /* find a request */
1054 tb = afs_brs;
1055 foundAny = 0;
1056 for (i = 0; i < NBRS; i++, tb++) {
1057 /* look for request with smallest ts */
1058 if ((tb->refCount > 0) && !(tb->flags & BSTARTED)) {
1059 /* new request, not yet picked up */
1060 if ((min_tb && (min_ts - tb->ts > 0)) || !min_tb) {
1061 min_tb = tb;
1062 min_ts = tb->ts;
1063 }
1064 }
1065 }
1066 if ((tb = min_tb)) {
1067 /* claim and process this request */
1068 tb->flags |= BSTARTED;
1069 ReleaseWriteLock(&afs_xbrs);
1070 foundAny = 1;
1071 afs_Trace1(afs_iclSetp, CM_TRACE_BKG1, ICL_TYPE_INT32,
1072 tb->opcode);
1073 if (tb->opcode == BOP_FETCH)
1074 BPrefetch(tb);
1075#if defined(AFS_CACHE_BYPASS)
1076 else if (tb->opcode == BOP_FETCH_NOCACHE)
1077 BPrefetchNoCache(tb);
1078#endif
1079 else if (tb->opcode == BOP_STORE)
1080 BStore(tb);
1081 else if (tb->opcode == BOP_PATH)
1082 BPath(tb);
1083#ifdef AFS_DARWIN80_ENV
1084 else if (tb->opcode == BOP_MOVE) {
1085 memcpy(uspc, (struct afs_uspc_param *) tb->ptr_parm[0],
1086 sizeof(struct afs_uspc_param));
1087 uspc->ts = tb->ts;
1088 /* string lengths capped in move vop; copy NUL tho */
1089 memcpy(param1, (char *)tb->ptr_parm[1],
1090 strlen(tb->ptr_parm[1])+1);
1091 memcpy(param2, (char *)tb->ptr_parm[2],
1092 strlen(tb->ptr_parm[2])+1);
1093 return 0;
1094 }
1095#endif
1096 else if (tb->opcode == BOP_PARTIAL_STORE)
1097 BPartialStore(tb);
1098 else
1099 panic("background bop");
1100 brequest_release(tb);
1101 ObtainWriteLock(&afs_xbrs, 305);
1102 }
1103 if (!foundAny) {
1104 /* wait for new request */
1105 afs_brsDaemons++;
1106 ReleaseWriteLock(&afs_xbrs);
1107 afs_osi_Sleep(&afs_brsDaemons);
1108 ObtainWriteLock(&afs_xbrs, 307);
1109 afs_brsDaemons--;
1110 }
1111 }
1112#ifdef AFS_NEW_BKG
1113 return -2;
1114#endif
1115}
1116
1117
1118void
1119shutdown_daemons(void)
1120{
1121 AFS_STATCNT(shutdown_daemons);
1122 if (afs_cold_shutdown) {
1123 afs_brsDaemons = brsInit = 0;
1124 afs_nbrs = 0;
1125 memset(afs_brs, 0, sizeof(afs_brs));
1126 memset(&afs_xbrs, 0, sizeof(afs_lock_t));
1127 afs_brsWaiters = 0;
1128#ifdef AFS_AIX41_ENV
1129 lock_free(&afs_asyncbuf_lock);
1130 unpin(&afs_asyncbuf, sizeof(struct buf *));
1131 unpin(&afs_asyncbuf_cv, sizeof(afs_int32));
1132 afs_initbiod = 0;
1133#endif
1134 }
1135}
1136
1137#if defined(AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1138/*
1139 * sgi - daemon - handles certain operations that otherwise
1140 * would use up too much kernel stack space
1141 *
1142 * This all assumes that since the caller must have the xdcache lock
1143 * exclusively that the list will never be more than one long
1144 * and noone else can attempt to add anything until we're done.
1145 */
1146SV_TYPE afs_sgibksync;
1147SV_TYPE afs_sgibkwait;
1148lock_t afs_sgibklock;
1149struct dcache *afs_sgibklist;
1150
1151int
1152afs_sgidaemon(void)
1153{
1154 int s;
1155 struct dcache *tdc;
1156
1157 if (afs_sgibklock == NULL) {
1158 SV_INIT(&afs_sgibksync, "bksync", 0, 0);
1159 SV_INIT(&afs_sgibkwait, "bkwait", 0, 0);
1160 SPINLOCK_INIT(&afs_sgibklock, "bklock");
1161 }
1162 s = SPLOCK(afs_sgibklock);
1163 for (;;) {
1164 /* wait for something to do */
1165 SP_WAIT(afs_sgibklock, s, &afs_sgibksync, PINOD);
1166 osi_Assert(afs_sgibklist);
1167
1168 /* XX will probably need to generalize to real list someday */
1169 s = SPLOCK(afs_sgibklock);
1170 while (afs_sgibklist) {
1171 tdc = afs_sgibklist;
1172 afs_sgibklist = NULL;
1173 SPUNLOCK(afs_sgibklock, s);
1174 AFS_GLOCK();
1175 tdc->dflags &= ~DFEntryMod;
1176 osi_Assert(afs_WriteDCache(tdc, 1) == 0);
1177 AFS_GUNLOCK();
1178 s = SPLOCK(afs_sgibklock);
1179 }
1180
1181 /* done all the work - wake everyone up */
1182 while (SV_SIGNAL(&afs_sgibkwait));
1183 }
1184}
1185#endif