Commit | Line | Data |
---|---|---|
805e021f CE |
1 | /* |
2 | * Copyright 2000, International Business Machines Corporation and others. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This software has been released under the terms of the IBM Public | |
6 | * License. For details, see the LICENSE file in the top-level source | |
7 | * directory or online at http://www.openafs.org/dl/license10.html | |
8 | */ | |
9 | ||
10 | /* | |
11 | * Description: | |
12 | * Test of the xstat_cm module. | |
13 | * | |
14 | *------------------------------------------------------------------------*/ | |
15 | ||
16 | #include <afsconfig.h> | |
17 | #include <afs/param.h> | |
18 | ||
19 | #include <roken.h> | |
20 | ||
21 | #include "xstat_cm.h" /*Interface for xstat_cm module */ | |
22 | #include <afs/cmd.h> /*Command line interpreter */ | |
23 | #include <afs/afsutil.h> | |
24 | ||
25 | /* | |
26 | * Command line parameter indices. | |
27 | * P_CM_NAMES : List of CacheManager names. | |
28 | * P_COLL_IDS : List of collection IDs to pick up. | |
29 | * P_ONESHOT : Are we gathering exactly one round of data? | |
30 | * P_DEBUG : Enable debugging output? | |
31 | */ | |
32 | #define P_CM_NAMES 0 | |
33 | #define P_COLL_IDS 1 | |
34 | #define P_ONESHOT 2 | |
35 | #define P_FREQUENCY 3 | |
36 | #define P_PERIOD 4 | |
37 | #define P_DEBUG 5 | |
38 | ||
39 | /* | |
40 | * Private globals. | |
41 | */ | |
42 | static int debugging_on = 0; /*Are we debugging? */ | |
43 | static int one_shot = 0; /*Single round of data collection? */ | |
44 | ||
45 | static char *fsOpNames[] = { | |
46 | "FetchData", | |
47 | "FetchACL", | |
48 | "FetchStatus", | |
49 | "StoreData", | |
50 | "StoreACL", | |
51 | "StoreStatus", | |
52 | "RemoveFile", | |
53 | "CreateFile", | |
54 | "Rename", | |
55 | "Symlink", | |
56 | "Link", | |
57 | "MakeDir", | |
58 | "RemoveDir", | |
59 | "SetLock", | |
60 | "ExtendLock", | |
61 | "ReleaseLock", | |
62 | "GetStatistics", | |
63 | "GiveUpCallbacks", | |
64 | "GetVolumeInfo", | |
65 | "GetVolumeStatus", | |
66 | "SetVolumeStatus", | |
67 | "GetRootVolume", | |
68 | "CheckToken", | |
69 | "GetTime", | |
70 | "NGetVolumeInfo", | |
71 | "BulkStatus", | |
72 | "XStatsVersion", | |
73 | "GetXStats", | |
74 | "XLookup", | |
75 | "ResidencyRpc" | |
76 | }; | |
77 | ||
78 | static char *cmOpNames[] = { | |
79 | "CallBack", | |
80 | "InitCallBackState", | |
81 | "Probe", | |
82 | "GetLock", | |
83 | "GetCE", | |
84 | "XStatsVersion", | |
85 | "GetXStats" | |
86 | }; | |
87 | ||
88 | static char *xferOpNames[] = { | |
89 | "FetchData", | |
90 | "StoreData" | |
91 | }; | |
92 | ||
93 | ||
94 | /* Print detailed functional call statistics */ | |
95 | ||
96 | void | |
97 | print_cmCallStats(void) | |
98 | { | |
99 | char *printableTime; /*Ptr to printable time string */ | |
100 | afs_int32 nitems; | |
101 | struct afs_CMStats *cmp; | |
102 | time_t probeTime = xstat_cm_Results.probeTime; | |
103 | ||
104 | printableTime = ctime(&probeTime); | |
105 | printableTime[strlen(printableTime) - 1] = '\0'; | |
106 | ||
107 | printf | |
108 | ("AFSCB_XSTATSCOLL_CALL_INFO (coll %d) for CM %s\n[Probe %u, %s]\n\n", | |
109 | xstat_cm_Results.collectionNumber, xstat_cm_Results.connP->hostName, | |
110 | xstat_cm_Results.probeNum, printableTime); | |
111 | ||
112 | cmp = (struct afs_CMStats *)(xstat_cm_Results.data.AFSCB_CollData_val); | |
113 | nitems = xstat_cm_Results.data.AFSCB_CollData_len; | |
114 | ||
115 | #define AFS_CS(call) \ | |
116 | if (nitems > 0) { \ | |
117 | printf("\t%10u %s\n", cmp->callInfo.C_ ## call, #call); \ | |
118 | nitems--; \ | |
119 | } | |
120 | ||
121 | AFS_CM_CALL_STATS | |
122 | #undef AFS_CS | |
123 | } | |
124 | ||
125 | ||
126 | /*------------------------------------------------------------------------ | |
127 | * PrintUpDownStats | |
128 | * | |
129 | * Description: | |
130 | * Print the up/downtime stats for the given class of server records | |
131 | * provided. | |
132 | * | |
133 | * Arguments: | |
134 | * a_upDownP : Ptr to the server up/down info. | |
135 | * | |
136 | * Returns: | |
137 | * Nothing. | |
138 | * | |
139 | * Environment: | |
140 | * Nothing interesting. | |
141 | * | |
142 | * Side Effects: | |
143 | * As advertised. | |
144 | *------------------------------------------------------------------------*/ | |
145 | ||
146 | void | |
147 | PrintUpDownStats(struct afs_stats_SrvUpDownInfo *a_upDownP) | |
148 | { /*PrintUpDownStats */ | |
149 | ||
150 | /* | |
151 | * First, print the simple values. | |
152 | */ | |
153 | printf("\t\t%10u numTtlRecords\n", a_upDownP->numTtlRecords); | |
154 | printf("\t\t%10u numUpRecords\n", a_upDownP->numUpRecords); | |
155 | printf("\t\t%10u numDownRecords\n", a_upDownP->numDownRecords); | |
156 | printf("\t\t%10u sumOfRecordAges\n", a_upDownP->sumOfRecordAges); | |
157 | printf("\t\t%10u ageOfYoungestRecord\n", a_upDownP->ageOfYoungestRecord); | |
158 | printf("\t\t%10u ageOfOldestRecord\n", a_upDownP->ageOfOldestRecord); | |
159 | printf("\t\t%10u numDowntimeIncidents\n", | |
160 | a_upDownP->numDowntimeIncidents); | |
161 | printf("\t\t%10u numRecordsNeverDown\n", a_upDownP->numRecordsNeverDown); | |
162 | printf("\t\t%10u maxDowntimesInARecord\n", | |
163 | a_upDownP->maxDowntimesInARecord); | |
164 | printf("\t\t%10u sumOfDowntimes\n", a_upDownP->sumOfDowntimes); | |
165 | printf("\t\t%10u shortestDowntime\n", a_upDownP->shortestDowntime); | |
166 | printf("\t\t%10u longestDowntime\n", a_upDownP->longestDowntime); | |
167 | ||
168 | /* | |
169 | * Now, print the array values. | |
170 | */ | |
171 | printf("\t\tDowntime duration distribution:\n"); | |
172 | printf("\t\t\t%8u: 0 min .. 10 min\n", a_upDownP->downDurations[0]); | |
173 | printf("\t\t\t%8u: 10 min .. 30 min\n", a_upDownP->downDurations[1]); | |
174 | printf("\t\t\t%8u: 30 min .. 1 hr\n", a_upDownP->downDurations[2]); | |
175 | printf("\t\t\t%8u: 1 hr .. 2 hr\n", a_upDownP->downDurations[3]); | |
176 | printf("\t\t\t%8u: 2 hr .. 4 hr\n", a_upDownP->downDurations[4]); | |
177 | printf("\t\t\t%8u: 4 hr .. 8 hr\n", a_upDownP->downDurations[5]); | |
178 | printf("\t\t\t%8u: > 8 hr\n", a_upDownP->downDurations[6]); | |
179 | ||
180 | printf("\t\tDowntime incident distribution:\n"); | |
181 | printf("\t\t\t%8u: 0 times\n", a_upDownP->downIncidents[0]); | |
182 | printf("\t\t\t%8u: 1 time\n", a_upDownP->downIncidents[1]); | |
183 | printf("\t\t\t%8u: 2 .. 5 times\n", a_upDownP->downIncidents[2]); | |
184 | printf("\t\t\t%8u: 6 .. 10 times\n", a_upDownP->downIncidents[3]); | |
185 | printf("\t\t\t%8u: 10 .. 50 times\n", a_upDownP->downIncidents[4]); | |
186 | printf("\t\t\t%8u: > 50 times\n", a_upDownP->downIncidents[5]); | |
187 | ||
188 | } /*PrintUpDownStats */ | |
189 | ||
190 | ||
191 | /*------------------------------------------------------------------------ | |
192 | * PrintOverallPerfInfo | |
193 | * | |
194 | * Description: | |
195 | * Print out overall performance numbers. | |
196 | * | |
197 | * Arguments: | |
198 | * a_ovP : Ptr to the overall performance numbers. | |
199 | * | |
200 | * Returns: | |
201 | * Nothing. | |
202 | * | |
203 | * Environment: | |
204 | * All the info we need is nestled into xstat_cm_Results. | |
205 | * | |
206 | * Side Effects: | |
207 | * As advertised. | |
208 | *------------------------------------------------------------------------*/ | |
209 | ||
210 | void | |
211 | PrintOverallPerfInfo(struct afs_stats_CMPerf *a_ovP) | |
212 | { /*PrintOverallPerfInfo */ | |
213 | ||
214 | printf("\t%10u numPerfCalls\n", a_ovP->numPerfCalls); | |
215 | ||
216 | printf("\t%10u epoch\n", a_ovP->epoch); | |
217 | printf("\t%10u numCellsVisible\n", a_ovP->numCellsVisible); | |
218 | printf("\t%10u numCellsContacted\n", a_ovP->numCellsContacted); | |
219 | printf("\t%10u dlocalAccesses\n", a_ovP->dlocalAccesses); | |
220 | printf("\t%10u vlocalAccesses\n", a_ovP->vlocalAccesses); | |
221 | printf("\t%10u dremoteAccesses\n", a_ovP->dremoteAccesses); | |
222 | printf("\t%10u vremoteAccesses\n", a_ovP->vremoteAccesses); | |
223 | printf("\t%10u cacheNumEntries\n", a_ovP->cacheNumEntries); | |
224 | printf("\t%10u cacheBlocksTotal\n", a_ovP->cacheBlocksTotal); | |
225 | printf("\t%10u cacheBlocksInUse\n", a_ovP->cacheBlocksInUse); | |
226 | printf("\t%10u cacheBlocksOrig\n", a_ovP->cacheBlocksOrig); | |
227 | printf("\t%10u cacheMaxDirtyChunks\n", a_ovP->cacheMaxDirtyChunks); | |
228 | printf("\t%10u cacheCurrDirtyChunks\n", a_ovP->cacheCurrDirtyChunks); | |
229 | printf("\t%10u dcacheHits\n", a_ovP->dcacheHits); | |
230 | printf("\t%10u vcacheHits\n", a_ovP->vcacheHits); | |
231 | printf("\t%10u dcacheMisses\n", a_ovP->dcacheMisses); | |
232 | printf("\t%10u vcacheMisses\n", a_ovP->vcacheMisses); | |
233 | printf("\t%10u cacheFilesReused\n", a_ovP->cacheFilesReused); | |
234 | printf("\t%10u vcacheXAllocs\n", a_ovP->vcacheXAllocs); | |
235 | printf("\t%10u dcacheXAllocs\n", a_ovP->dcacheXAllocs); | |
236 | ||
237 | printf("\t%10u bufAlloced\n", a_ovP->bufAlloced); | |
238 | printf("\t%10u bufHits\n", a_ovP->bufHits); | |
239 | printf("\t%10u bufMisses\n", a_ovP->bufMisses); | |
240 | printf("\t%10u bufFlushDirty\n", a_ovP->bufFlushDirty); | |
241 | ||
242 | printf("\t%10u LargeBlocksActive\n", a_ovP->LargeBlocksActive); | |
243 | printf("\t%10u LargeBlocksAlloced\n", a_ovP->LargeBlocksAlloced); | |
244 | printf("\t%10u SmallBlocksActive\n", a_ovP->SmallBlocksActive); | |
245 | printf("\t%10u SmallBlocksAlloced\n", a_ovP->SmallBlocksAlloced); | |
246 | printf("\t%10u OutStandingMemUsage\n", a_ovP->OutStandingMemUsage); | |
247 | printf("\t%10u OutStandingAllocs\n", a_ovP->OutStandingAllocs); | |
248 | printf("\t%10u CallBackAlloced\n", a_ovP->CallBackAlloced); | |
249 | printf("\t%10u CallBackFlushes\n", a_ovP->CallBackFlushes); | |
250 | printf("\t%10u CallBackLoops\n", a_ovP->cbloops); | |
251 | ||
252 | printf("\t%10u srvRecords\n", a_ovP->srvRecords); | |
253 | printf("\t%10u srvNumBuckets\n", a_ovP->srvNumBuckets); | |
254 | printf("\t%10u srvMaxChainLength\n", a_ovP->srvMaxChainLength); | |
255 | printf("\t%10u srvMaxChainLengthHWM\n", a_ovP->srvMaxChainLengthHWM); | |
256 | printf("\t%10u srvRecordsHWM\n", a_ovP->srvRecordsHWM); | |
257 | ||
258 | printf("\t%10u cacheBucket0_Discarded\n", a_ovP->cacheBucket0_Discarded); | |
259 | printf("\t%10u cacheBucket1_Discarded\n", a_ovP->cacheBucket1_Discarded); | |
260 | printf("\t%10u cacheBucket2_Discarded\n", a_ovP->cacheBucket2_Discarded); | |
261 | ||
262 | printf("\t%10u sysName_ID\n", a_ovP->sysName_ID); | |
263 | ||
264 | printf("\tFile Server up/downtimes, same cell:\n"); | |
265 | PrintUpDownStats(&(a_ovP->fs_UpDown[0])); | |
266 | ||
267 | printf("\tFile Server up/downtimes, diff cell:\n"); | |
268 | PrintUpDownStats(&(a_ovP->fs_UpDown[1])); | |
269 | ||
270 | printf("\tVL Server up/downtimes, same cell:\n"); | |
271 | PrintUpDownStats(&(a_ovP->vl_UpDown[0])); | |
272 | ||
273 | printf("\tVL Server up/downtimes, diff cell:\n"); | |
274 | PrintUpDownStats(&(a_ovP->vl_UpDown[1])); | |
275 | ||
276 | } /*PrintOverallPerfInfo */ | |
277 | ||
278 | ||
279 | /*------------------------------------------------------------------------ | |
280 | * PrintPerfInfo | |
281 | * | |
282 | * Description: | |
283 | * Print out the AFSCB_XSTATSCOLL_PERF_INFO collection we just | |
284 | * received. | |
285 | * | |
286 | * Arguments: | |
287 | * None. | |
288 | * | |
289 | * Returns: | |
290 | * Nothing. | |
291 | * | |
292 | * Environment: | |
293 | * All the info we need is nestled into xstat_cm_Results. | |
294 | * | |
295 | * Side Effects: | |
296 | * As advertised. | |
297 | *------------------------------------------------------------------------*/ | |
298 | ||
299 | void | |
300 | PrintPerfInfo(void) | |
301 | { /*PrintPerfInfo */ | |
302 | ||
303 | static afs_int32 perfInt32s = (sizeof(struct afs_stats_CMPerf) >> 2); /*Correct # int32s to rcv */ | |
304 | afs_int32 numInt32s; /*# int32words received */ | |
305 | struct afs_stats_CMPerf *perfP; /*Ptr to performance stats */ | |
306 | char *printableTime; /*Ptr to printable time string */ | |
307 | time_t probeTime = xstat_cm_Results.probeTime; | |
308 | ||
309 | numInt32s = xstat_cm_Results.data.AFSCB_CollData_len; | |
310 | if (numInt32s != perfInt32s) { | |
311 | printf("** Data size mismatch in performance collection!"); | |
312 | printf("** Expecting %u, got %u\n", perfInt32s, numInt32s); | |
313 | printf("** Version mismatch with Cache Manager\n"); | |
314 | return; | |
315 | } | |
316 | ||
317 | printableTime = ctime(&probeTime); | |
318 | printableTime[strlen(printableTime) - 1] = '\0'; | |
319 | perfP = (struct afs_stats_CMPerf *) | |
320 | (xstat_cm_Results.data.AFSCB_CollData_val); | |
321 | ||
322 | printf | |
323 | ("AFSCB_XSTATSCOLL_PERF_INFO (coll %d) for CM %s\n[Probe %u, %s]\n\n", | |
324 | xstat_cm_Results.collectionNumber, xstat_cm_Results.connP->hostName, | |
325 | xstat_cm_Results.probeNum, printableTime); | |
326 | ||
327 | PrintOverallPerfInfo(perfP); | |
328 | ||
329 | } /*PrintPerfInfo */ | |
330 | ||
331 | ||
332 | /*------------------------------------------------------------------------ | |
333 | * PrintOpTiming | |
334 | * | |
335 | * Description: | |
336 | * Print out the contents of an FS RPC op timing structure. | |
337 | * | |
338 | * Arguments: | |
339 | * a_opIdx : Index of the AFS operation we're printing number on. | |
340 | * a_opNames : Ptr to table of operaton names. | |
341 | * a_opTimeP : Ptr to the op timing structure to print. | |
342 | * | |
343 | * Returns: | |
344 | * Nothing. | |
345 | * | |
346 | * Environment: | |
347 | * Nothing interesting. | |
348 | * | |
349 | * Side Effects: | |
350 | * As advertised. | |
351 | *------------------------------------------------------------------------*/ | |
352 | ||
353 | void | |
354 | PrintOpTiming(int a_opIdx, char *a_opNames[], | |
355 | struct afs_stats_opTimingData *a_opTimeP) | |
356 | { /*PrintOpTiming */ | |
357 | ||
358 | printf | |
359 | ("%15s: %u ops (%u OK); sum=%lu.%06lu, sqr=%lu.%06lu, min=%lu.%06lu, max=%lu.%06lu\n", | |
360 | a_opNames[a_opIdx], a_opTimeP->numOps, a_opTimeP->numSuccesses, | |
361 | (long)a_opTimeP->sumTime.tv_sec, (long)a_opTimeP->sumTime.tv_usec, | |
362 | (long)a_opTimeP->sqrTime.tv_sec, (long)a_opTimeP->sqrTime.tv_usec, | |
363 | (long)a_opTimeP->minTime.tv_sec, (long)a_opTimeP->minTime.tv_usec, | |
364 | (long)a_opTimeP->maxTime.tv_sec, (long)a_opTimeP->maxTime.tv_usec); | |
365 | ||
366 | } /*PrintOpTiming */ | |
367 | ||
368 | ||
369 | /*------------------------------------------------------------------------ | |
370 | * PrintXferTiming | |
371 | * | |
372 | * Description: | |
373 | * Print out the contents of a data transfer structure. | |
374 | * | |
375 | * Arguments: | |
376 | * a_opIdx : Index of the AFS operation we're printing number on. | |
377 | * a_opNames : Ptr to table of operation names. | |
378 | * a_xferP : Ptr to the data transfer structure to print. | |
379 | * | |
380 | * Returns: | |
381 | * Nothing. | |
382 | * | |
383 | * Environment: | |
384 | * Nothing interesting. | |
385 | * | |
386 | * Side Effects: | |
387 | * As advertised. | |
388 | *------------------------------------------------------------------------*/ | |
389 | ||
390 | void | |
391 | PrintXferTiming(int a_opIdx, char *a_opNames[], | |
392 | struct afs_stats_xferData *a_xferP) | |
393 | { /*PrintXferTiming */ | |
394 | ||
395 | printf | |
396 | ("%s: %u xfers (%u OK), time sum=%lu.%06lu, sqr=%lu.%06lu, min=%lu.%06lu, max=%lu.%06lu\n", | |
397 | a_opNames[a_opIdx], a_xferP->numXfers, a_xferP->numSuccesses, | |
398 | (long)a_xferP->sumTime.tv_sec, (long)a_xferP->sumTime.tv_usec, | |
399 | (long)a_xferP->sqrTime.tv_sec, (long)a_xferP->sqrTime.tv_usec, | |
400 | (long)a_xferP->minTime.tv_sec, (long)a_xferP->minTime.tv_usec, | |
401 | (long)a_xferP->maxTime.tv_sec, (long)a_xferP->maxTime.tv_usec); | |
402 | printf("\t[bytes: sum=%u, min=%u, max=%u]\n", a_xferP->sumBytes, | |
403 | a_xferP->minBytes, a_xferP->maxBytes); | |
404 | printf | |
405 | ("\t[buckets: 0: %u, 1: %u, 2: %u, 3: %u, 4: %u, 5: %u, 6: %u, 7: %u, 8: %u]\n", | |
406 | a_xferP->count[0], a_xferP->count[1], a_xferP->count[2], | |
407 | a_xferP->count[3], a_xferP->count[4], a_xferP->count[5], | |
408 | a_xferP->count[6], a_xferP->count[7], a_xferP->count[8]); | |
409 | ||
410 | ||
411 | } /*PrintXferTiming */ | |
412 | ||
413 | ||
414 | /*------------------------------------------------------------------------ | |
415 | * PrintErrInfo | |
416 | * | |
417 | * Description: | |
418 | * Print out the contents of an FS RPC error info structure. | |
419 | * | |
420 | * Arguments: | |
421 | * a_opIdx : Index of the AFS operation we're printing. | |
422 | * a_opNames : Ptr to table of operation names. | |
423 | * a_opErrP : Ptr to the op timing structure to print. | |
424 | * | |
425 | * Returns: | |
426 | * Nothing. | |
427 | * | |
428 | * Environment: | |
429 | * Nothing interesting. | |
430 | * | |
431 | * Side Effects: | |
432 | * As advertised. | |
433 | *------------------------------------------------------------------------*/ | |
434 | ||
435 | void | |
436 | PrintErrInfo(int a_opIdx, char *a_opNames[], | |
437 | struct afs_stats_RPCErrors *a_opErrP) | |
438 | { /*PrintErrInfo */ | |
439 | ||
440 | printf | |
441 | ("%15s: %u server, %u network, %u prot, %u vol, %u busies, %u other\n", | |
442 | a_opNames[a_opIdx], a_opErrP->err_Server, a_opErrP->err_Network, | |
443 | a_opErrP->err_Protection, a_opErrP->err_Volume, | |
444 | a_opErrP->err_VolumeBusies, a_opErrP->err_Other); | |
445 | ||
446 | } /*PrintErrInfo */ | |
447 | ||
448 | ||
449 | /*------------------------------------------------------------------------ | |
450 | * PrintRPCPerfInfo | |
451 | * | |
452 | * Description: | |
453 | * Print out a set of RPC performance numbers. | |
454 | * | |
455 | * Arguments: | |
456 | * a_rpcP : Ptr to RPC perf numbers to print. | |
457 | * | |
458 | * Returns: | |
459 | * Nothing. | |
460 | * | |
461 | * Environment: | |
462 | * Nothing interesting. | |
463 | * | |
464 | * Side Effects: | |
465 | * As advertised. | |
466 | *------------------------------------------------------------------------*/ | |
467 | ||
468 | void | |
469 | PrintRPCPerfInfo(struct afs_stats_RPCOpInfo *a_rpcP) | |
470 | { /*PrintRPCPerfInfo */ | |
471 | ||
472 | int currIdx; /*Loop variable */ | |
473 | ||
474 | /* | |
475 | * Print the contents of each of the opcode-related arrays. | |
476 | */ | |
477 | printf("FS Operation Timings:\n---------------------\n"); | |
478 | for (currIdx = 0; currIdx < AFS_STATS_NUM_FS_RPC_OPS; currIdx++) | |
479 | PrintOpTiming(currIdx, fsOpNames, &(a_rpcP->fsRPCTimes[currIdx])); | |
480 | ||
481 | printf("\nError Info:\n-----------\n"); | |
482 | for (currIdx = 0; currIdx < AFS_STATS_NUM_FS_RPC_OPS; currIdx++) | |
483 | PrintErrInfo(currIdx, fsOpNames, &(a_rpcP->fsRPCErrors[currIdx])); | |
484 | ||
485 | printf("\nTransfer timings:\n-----------------\n"); | |
486 | for (currIdx = 0; currIdx < AFS_STATS_NUM_FS_XFER_OPS; currIdx++) | |
487 | PrintXferTiming(currIdx, xferOpNames, | |
488 | &(a_rpcP->fsXferTimes[currIdx])); | |
489 | ||
490 | printf("\nCM Operation Timings:\n---------------------\n"); | |
491 | for (currIdx = 0; currIdx < AFS_STATS_NUM_CM_RPC_OPS; currIdx++) | |
492 | PrintOpTiming(currIdx, cmOpNames, &(a_rpcP->cmRPCTimes[currIdx])); | |
493 | ||
494 | } /*PrintRPCPerfInfo */ | |
495 | ||
496 | ||
497 | /*------------------------------------------------------------------------ | |
498 | * PrintFullPerfInfo | |
499 | * | |
500 | * Description: | |
501 | * Print out a set of full performance numbers. | |
502 | * | |
503 | * Arguments: | |
504 | * None. | |
505 | * | |
506 | * Returns: | |
507 | * Nothing. | |
508 | * | |
509 | * Environment: | |
510 | * Nothing interesting. | |
511 | * | |
512 | * Side Effects: | |
513 | * As advertised. | |
514 | *------------------------------------------------------------------------*/ | |
515 | ||
516 | void | |
517 | PrintFullPerfInfo(void) | |
518 | { /*PrintFullPerfInfo */ | |
519 | ||
520 | struct afs_stats_AuthentInfo *authentP; /*Ptr to authentication stats */ | |
521 | struct afs_stats_AccessInfo *accessinfP; /*Ptr to access stats */ | |
522 | static afs_int32 fullPerfInt32s = (sizeof(struct afs_stats_CMFullPerf) >> 2); /*Correct #int32s */ | |
523 | afs_int32 numInt32s; /*# int32s actually received */ | |
524 | struct afs_stats_CMFullPerf *fullP; /*Ptr to full perf info */ | |
525 | ||
526 | char *printableTime; /*Ptr to printable time string */ | |
527 | time_t probeTime = xstat_cm_Results.probeTime; | |
528 | ||
529 | numInt32s = xstat_cm_Results.data.AFSCB_CollData_len; | |
530 | if (numInt32s != fullPerfInt32s) { | |
531 | printf("** Data size mismatch in performance collection!"); | |
532 | printf("** Expecting %u, got %u\n", fullPerfInt32s, numInt32s); | |
533 | printf("** Version mismatch with Cache Manager\n"); | |
534 | return; | |
535 | } | |
536 | ||
537 | printableTime = ctime(&probeTime); | |
538 | printableTime[strlen(printableTime) - 1] = '\0'; | |
539 | fullP = (struct afs_stats_CMFullPerf *) | |
540 | (xstat_cm_Results.data.AFSCB_CollData_val); | |
541 | ||
542 | printf | |
543 | ("AFSCB_XSTATSCOLL_FULL_PERF_INFO (coll %d) for CM %s\n[Probe %u, %s]\n\n", | |
544 | xstat_cm_Results.collectionNumber, xstat_cm_Results.connP->hostName, | |
545 | xstat_cm_Results.probeNum, printableTime); | |
546 | ||
547 | /* | |
548 | * Print the overall numbers first, followed by all of the RPC numbers, | |
549 | * then each of the other groupings. | |
550 | */ | |
551 | printf("Overall Performance Info:\n-------------------------\n"); | |
552 | PrintOverallPerfInfo(&(fullP->perf)); | |
553 | printf("\n"); | |
554 | PrintRPCPerfInfo(&(fullP->rpc)); | |
555 | ||
556 | authentP = &(fullP->authent); | |
557 | printf("\nAuthentication info:\n--------------------\n"); | |
558 | printf | |
559 | ("\t%u PAGS, %u records (%u auth, %u unauth), %u max in PAG, chain max: %u\n", | |
560 | authentP->curr_PAGs, authentP->curr_Records, | |
561 | authentP->curr_AuthRecords, authentP->curr_UnauthRecords, | |
562 | authentP->curr_MaxRecordsInPAG, authentP->curr_LongestChain); | |
563 | printf("\t%u PAG creations, %u tkt updates\n", authentP->PAGCreations, | |
564 | authentP->TicketUpdates); | |
565 | printf("\t[HWMs: %u PAGS, %u records, %u max in PAG, chain max: %u]\n", | |
566 | authentP->HWM_PAGs, authentP->HWM_Records, | |
567 | authentP->HWM_MaxRecordsInPAG, authentP->HWM_LongestChain); | |
568 | ||
569 | accessinfP = &(fullP->accessinf); | |
570 | printf("\n[Un]replicated accesses:\n------------------------\n"); | |
571 | printf | |
572 | ("\t%u unrep, %u rep, %u reps accessed, %u max reps/ref, %u first OK\n\n", | |
573 | accessinfP->unreplicatedRefs, accessinfP->replicatedRefs, | |
574 | accessinfP->numReplicasAccessed, accessinfP->maxReplicasPerRef, | |
575 | accessinfP->refFirstReplicaOK); | |
576 | ||
577 | /* There really isn't any authorship info | |
578 | * authorP = &(fullP->author); */ | |
579 | ||
580 | } /*PrintFullPerfInfo */ | |
581 | ||
582 | ||
583 | /*------------------------------------------------------------------------ | |
584 | * CM_Handler | |
585 | * | |
586 | * Description: | |
587 | * Handler routine passed to the xstat_cm module. This handler is | |
588 | * called immediately after a poll of one of the Cache Managers has | |
589 | * taken place. All it needs to know is exported by the xstat_cm | |
590 | * module, namely the data structure where the probe results are | |
591 | * stored. | |
592 | * | |
593 | * Arguments: | |
594 | * None. | |
595 | * | |
596 | * Returns: | |
597 | * 0 on success, | |
598 | * -1 otherwise. | |
599 | * | |
600 | * Environment: | |
601 | * See above. All we do now is print out what we got. | |
602 | * | |
603 | * Side Effects: | |
604 | * As advertised. | |
605 | *------------------------------------------------------------------------*/ | |
606 | ||
607 | int | |
608 | CM_Handler(void) | |
609 | { /*CM_Handler */ | |
610 | ||
611 | static char rn[] = "CM_Handler"; /*Routine name */ | |
612 | ||
613 | printf("\n-----------------------------------------------------------\n"); | |
614 | ||
615 | /* | |
616 | * If the probe failed, there isn't much we can do except gripe. | |
617 | */ | |
618 | if (xstat_cm_Results.probeOK) { | |
619 | printf("%s: Probe %u, collection %d to CM on '%s' failed, code=%d\n", | |
620 | rn, xstat_cm_Results.probeNum, | |
621 | xstat_cm_Results.collectionNumber, | |
622 | xstat_cm_Results.connP->hostName, xstat_cm_Results.probeOK); | |
623 | return (0); | |
624 | } | |
625 | ||
626 | if (debugging_on) { | |
627 | int i; | |
628 | int numInt32s = xstat_cm_Results.data.AFSCB_CollData_len; | |
629 | afs_int32 *entry = xstat_cm_Results.data.AFSCB_CollData_val; | |
630 | ||
631 | printf("debug: got collection number %d\n", xstat_cm_Results.collectionNumber); | |
632 | printf("debug: collection data length is %d\n", numInt32s); | |
633 | for (i = 0; i < numInt32s; i++) { | |
634 | printf("debug: entry %d %u\n", i, entry[i]); | |
635 | } | |
636 | printf("\n"); | |
637 | } | |
638 | ||
639 | switch (xstat_cm_Results.collectionNumber) { | |
640 | case AFSCB_XSTATSCOLL_CALL_INFO: | |
641 | print_cmCallStats(); | |
642 | break; | |
643 | ||
644 | case AFSCB_XSTATSCOLL_PERF_INFO: | |
645 | /* we will do nothing here */ | |
646 | /* PrintPerfInfo(); */ | |
647 | break; | |
648 | ||
649 | case AFSCB_XSTATSCOLL_FULL_PERF_INFO: | |
650 | PrintFullPerfInfo(); | |
651 | break; | |
652 | ||
653 | default: | |
654 | printf("** Unknown collection: %d\n", | |
655 | xstat_cm_Results.collectionNumber); | |
656 | } | |
657 | ||
658 | /* | |
659 | * Return the happy news. | |
660 | */ | |
661 | return (0); | |
662 | ||
663 | } /*CM_Handler */ | |
664 | ||
665 | ||
666 | /*------------------------------------------------------------------------ | |
667 | * CountListItems | |
668 | * | |
669 | * Description: | |
670 | * Given a pointer to the list of Cache Managers we'll be polling | |
671 | * (or, in fact, any list at all), compute the length of the list. | |
672 | * | |
673 | * Arguments: | |
674 | * struct cmd_item *a_firstItem : Ptr to first item in list. | |
675 | * | |
676 | * Returns: | |
677 | * Length of the above list. | |
678 | * | |
679 | * Environment: | |
680 | * Nothing interesting. | |
681 | * | |
682 | * Side Effects: | |
683 | * As advertised. | |
684 | *------------------------------------------------------------------------*/ | |
685 | ||
686 | static int | |
687 | CountListItems(struct cmd_item *a_firstItem) | |
688 | { /*CountListItems */ | |
689 | ||
690 | int list_len; /*List length */ | |
691 | struct cmd_item *curr_item; /*Ptr to current item */ | |
692 | ||
693 | list_len = 0; | |
694 | curr_item = a_firstItem; | |
695 | ||
696 | /* | |
697 | * Count 'em up. | |
698 | */ | |
699 | while (curr_item) { | |
700 | list_len++; | |
701 | curr_item = curr_item->next; | |
702 | } | |
703 | ||
704 | /* | |
705 | * Return our tally. | |
706 | */ | |
707 | return (list_len); | |
708 | ||
709 | } /*CountListItems */ | |
710 | ||
711 | ||
712 | /*------------------------------------------------------------------------ | |
713 | * RunTheTest | |
714 | * | |
715 | * Description: | |
716 | * Routine called by the command line interpreter to execute the | |
717 | * meat of the program. We count the number of Cache Managers | |
718 | * to watch, allocate enough space to remember all the connection | |
719 | * info for them, then go for it. | |
720 | * | |
721 | * | |
722 | * Arguments: | |
723 | * a_s : Ptr to the command line syntax descriptor. | |
724 | * | |
725 | * Returns: | |
726 | * 0, but may exit the whole program on an error! | |
727 | * | |
728 | * Environment: | |
729 | * Nothing interesting. | |
730 | * | |
731 | * Side Effects: | |
732 | * As advertised. | |
733 | *------------------------------------------------------------------------*/ | |
734 | ||
735 | int | |
736 | RunTheTest(struct cmd_syndesc *a_s, void *arock) | |
737 | { /*RunTheTest */ | |
738 | ||
739 | static char rn[] = "RunTheTest"; /*Routine name */ | |
740 | int code; /*Return code */ | |
741 | int numCMs; /*# Cache Managers to monitor */ | |
742 | int numCollIDs; /*# collections to fetch */ | |
743 | int currCM; /*Loop index */ | |
744 | int currCollIDIdx; /*Index of current collection ID */ | |
745 | afs_int32 *collIDP; /*Ptr to array of collection IDs */ | |
746 | afs_int32 *currCollIDP; /*Ptr to current collection ID */ | |
747 | struct cmd_item *curr_item; /*Current CM cmd line record */ | |
748 | struct sockaddr_in *CMSktArray; /*Cache Manager socket array */ | |
749 | struct hostent *he; /*Host entry */ | |
750 | struct timeval tv; /*Time structure */ | |
751 | int sleep_secs; /*Number of seconds to sleep */ | |
752 | int initFlags; /*Flags passed to the init fcn */ | |
753 | int waitCode; /*Result of LWP_WaitProcess() */ | |
754 | int freq; /*Frequency of polls */ | |
755 | int period; /*Time in minutes of data collection */ | |
756 | ||
757 | /* | |
758 | * Are we doing one-shot measurements? | |
759 | */ | |
760 | if (a_s->parms[P_ONESHOT].items != 0) | |
761 | one_shot = 1; | |
762 | ||
763 | /* | |
764 | * Are we doing debugging output? | |
765 | */ | |
766 | if (a_s->parms[P_DEBUG].items != 0) | |
767 | debugging_on = 1; | |
768 | ||
769 | /* | |
770 | * Pull out the number of Cache Managers to watch and the number of | |
771 | * collections to get. | |
772 | */ | |
773 | numCMs = CountListItems(a_s->parms[P_CM_NAMES].items); | |
774 | numCollIDs = CountListItems(a_s->parms[P_COLL_IDS].items); | |
775 | ||
776 | /* Get the polling frequency */ | |
777 | if (a_s->parms[P_FREQUENCY].items != 0) | |
778 | freq = atoi(a_s->parms[P_FREQUENCY].items->data); | |
779 | else | |
780 | freq = 30; /* default to 30 seconds */ | |
781 | ||
782 | /* Get the time duration to run the tests */ | |
783 | if (a_s->parms[P_PERIOD].items != 0) | |
784 | period = atoi(a_s->parms[P_PERIOD].items->data); | |
785 | else | |
786 | period = 10; /* default to 10 minutes */ | |
787 | ||
788 | /* | |
789 | * Allocate the socket array. | |
790 | */ | |
791 | if (debugging_on) | |
792 | printf("%s: Allocating socket array for %d Cache Manager(s)\n", rn, | |
793 | numCMs); | |
794 | if (numCMs > 0) { | |
795 | CMSktArray = calloc(numCMs, sizeof(struct sockaddr_in)); | |
796 | if (CMSktArray == NULL) { | |
797 | printf("%s: Can't allocate socket array for %d Cache Managers\n", | |
798 | rn, numCMs); | |
799 | exit(1); | |
800 | } | |
801 | } else { | |
802 | CMSktArray = NULL; | |
803 | } | |
804 | ||
805 | /* | |
806 | * Fill in the socket array for each of the Cache Managers listed. | |
807 | */ | |
808 | curr_item = a_s->parms[P_CM_NAMES].items; | |
809 | for (currCM = 0; currCM < numCMs; currCM++) { | |
810 | CMSktArray[currCM].sin_family = AF_INET; | |
811 | CMSktArray[currCM].sin_port = htons(7001); /* Cache Manager port */ | |
812 | he = hostutil_GetHostByName(curr_item->data); | |
813 | if (he == NULL) { | |
814 | fprintf(stderr, "[%s] Can't get host info for '%s'\n", rn, | |
815 | curr_item->data); | |
816 | exit(-1); | |
817 | } | |
818 | memcpy(&(CMSktArray[currCM].sin_addr.s_addr), he->h_addr, 4); | |
819 | ||
820 | /* | |
821 | * Move to the next CM name. | |
822 | */ | |
823 | curr_item = curr_item->next; | |
824 | ||
825 | } /*Get socket info for each Cache Manager */ | |
826 | ||
827 | /* | |
828 | * Create and fill up the array of desired collection IDs. | |
829 | */ | |
830 | if (debugging_on) | |
831 | printf("Allocating %d long(s) for coll ID\n", numCollIDs); | |
832 | ||
833 | if (numCollIDs > 0) | |
834 | collIDP = calloc(numCollIDs, sizeof(afs_int32)); | |
835 | else | |
836 | collIDP = NULL; | |
837 | ||
838 | currCollIDP = collIDP; | |
839 | curr_item = a_s->parms[P_COLL_IDS].items; | |
840 | for (currCollIDIdx = 0; currCollIDIdx < numCollIDs; currCollIDIdx++) { | |
841 | *currCollIDP = (afs_int32) (atoi(curr_item->data)); | |
842 | if (debugging_on) | |
843 | printf("CollID at index %d is %d\n", currCollIDIdx, *currCollIDP); | |
844 | curr_item = curr_item->next; | |
845 | currCollIDP++; | |
846 | }; | |
847 | ||
848 | /* | |
849 | * Crank up the Cache Manager prober, then sit back and have fun. | |
850 | */ | |
851 | printf("\nStarting up the xstat_cm service, "); | |
852 | initFlags = 0; | |
853 | if (debugging_on) { | |
854 | initFlags |= XSTAT_CM_INITFLAG_DEBUGGING; | |
855 | printf("debugging enabled, "); | |
856 | } else | |
857 | printf("no debugging, "); | |
858 | if (one_shot) { | |
859 | initFlags |= XSTAT_CM_INITFLAG_ONE_SHOT; | |
860 | printf("one-shot operation\n"); | |
861 | } else | |
862 | printf("continuous operation\n"); | |
863 | ||
864 | code = xstat_cm_Init(numCMs, /*Num CMs */ | |
865 | CMSktArray, /*File Server socket array */ | |
866 | freq, /*Probe every 30 seconds */ | |
867 | CM_Handler, /*Handler routine */ | |
868 | initFlags, /*Initialization flags */ | |
869 | numCollIDs, /*Number of collection IDs */ | |
870 | collIDP); /*Ptr to collection ID array */ | |
871 | if (code) { | |
872 | fprintf(stderr, "[%s] Error returned by xstat_cm_Init: %d\n", rn, | |
873 | code); | |
874 | xstat_cm_Cleanup(1); /*Get rid of malloc'ed structures */ | |
875 | exit(-1); | |
876 | } | |
877 | ||
878 | if (one_shot) { | |
879 | /* | |
880 | * One-shot operation; just wait for the collection to be done. | |
881 | */ | |
882 | if (debugging_on) | |
883 | printf("[%s] Calling LWP_WaitProcess() on event %" AFS_PTR_FMT | |
884 | "\n", rn, &terminationEvent); | |
885 | waitCode = LWP_WaitProcess(&terminationEvent); | |
886 | if (debugging_on) | |
887 | printf("[%s] Returned from LWP_WaitProcess()\n", rn); | |
888 | if (waitCode) { | |
889 | if (debugging_on) | |
890 | fprintf(stderr, | |
891 | "[%s] Error %d encountered by LWP_WaitProcess()\n", | |
892 | rn, waitCode); | |
893 | } | |
894 | } else { | |
895 | /* | |
896 | * Continuous operation. | |
897 | */ | |
898 | sleep_secs = 60 * period; /*length of data collection */ | |
899 | printf | |
900 | ("xstat_cm service started, main thread sleeping for %d secs.\n", | |
901 | sleep_secs); | |
902 | ||
903 | /* | |
904 | * Let's just fall asleep for a while, then we'll clean up. | |
905 | */ | |
906 | tv.tv_sec = sleep_secs; | |
907 | tv.tv_usec = 0; | |
908 | code = IOMGR_Select(0, /*Num fds */ | |
909 | 0, /*Descriptors ready for reading */ | |
910 | 0, /*Descriptors ready for writing */ | |
911 | 0, /*Descriptors with exceptional conditions */ | |
912 | &tv); /*Timeout structure */ | |
913 | if (code) { | |
914 | fprintf(stderr, | |
915 | "[%s] IOMGR_Select() returned non-zero value: %d\n", rn, | |
916 | code); | |
917 | } | |
918 | } | |
919 | ||
920 | /* | |
921 | * We're all done. Clean up, put the last nail in Rx, then | |
922 | * exit happily. | |
923 | */ | |
924 | if (debugging_on) | |
925 | printf("\nYawn, main thread just woke up. Cleaning things out...\n"); | |
926 | xstat_cm_Cleanup(1); /*Get rid of malloc'ed data */ | |
927 | rx_Finalize(); | |
928 | return (0); | |
929 | ||
930 | } /*RunTheTest */ | |
931 | ||
932 | ||
933 | #include "AFS_component_version_number.c" | |
934 | int | |
935 | main(int argc, char **argv) | |
936 | { /*Main routine */ | |
937 | ||
938 | static char rn[] = "xstat_cm_test"; /*Routine name */ | |
939 | afs_int32 code; /*Return code */ | |
940 | struct cmd_syndesc *ts; /*Ptr to cmd line syntax desc */ | |
941 | ||
942 | /* | |
943 | * Set up the commands we understand. | |
944 | */ | |
945 | ts = cmd_CreateSyntax("initcmd", RunTheTest, NULL, 0, "initialize the program"); | |
946 | cmd_AddParm(ts, "-cmname", CMD_LIST, CMD_REQUIRED, | |
947 | "Cache Manager name(s) to monitor"); | |
948 | cmd_AddParm(ts, "-collID", CMD_LIST, CMD_REQUIRED, | |
949 | "Collection(s) to fetch"); | |
950 | cmd_AddParm(ts, "-onceonly", CMD_FLAG, CMD_OPTIONAL, | |
951 | "Collect results exactly once, then quit"); | |
952 | cmd_AddParm(ts, "-frequency", CMD_SINGLE, CMD_OPTIONAL, | |
953 | "poll frequency, in seconds"); | |
954 | cmd_AddParm(ts, "-period", CMD_SINGLE, CMD_OPTIONAL, | |
955 | "data collection time, in minutes"); | |
956 | cmd_AddParm(ts, "-debug", CMD_FLAG, CMD_OPTIONAL, | |
957 | "turn on debugging output"); | |
958 | ||
959 | /* | |
960 | * Parse command-line switches & execute the test, then get the | |
961 | * heck out of here. | |
962 | */ | |
963 | code = cmd_Dispatch(argc, argv); | |
964 | if (code) { | |
965 | fprintf(stderr, "[%s] Call to cmd_Dispatch() failed; code is %d\n", | |
966 | rn, code); | |
967 | } | |
968 | ||
969 | exit(code); | |
970 | ||
971 | } /*Main routine */ |