Commit | Line | Data |
---|---|---|
805e021f CE |
1 | /* |
2 | * Copyright 2000, International Business Machines Corporation and others. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This software has been released under the terms of the IBM Public | |
6 | * License. For details, see the LICENSE file in the top-level source | |
7 | * directory or online at http://www.openafs.org/dl/license10.html | |
8 | */ | |
9 | ||
10 | /* | |
11 | * Description: | |
12 | * Test of the xstat_fs module. | |
13 | * | |
14 | *------------------------------------------------------------------------*/ | |
15 | ||
16 | #include <afsconfig.h> | |
17 | #include <afs/param.h> | |
18 | ||
19 | #include <roken.h> | |
20 | ||
21 | #include "xstat_fs.h" /*Interface for xstat_fs module */ | |
22 | #include <afs/cmd.h> /*Command line interpreter */ | |
23 | #include <afs/afsutil.h> | |
24 | ||
25 | /* | |
26 | * Command line parameter indices. | |
27 | * P_FS_NAMES : List of FileServer names. | |
28 | * P_COLL_IDS : List of collection IDs to pick up. | |
29 | * P_ONESHOT : Are we gathering exactly one round of data? | |
30 | * P_DEBUG : Enable debugging output? | |
31 | */ | |
32 | #define P_FS_NAMES 0 | |
33 | #define P_COLL_IDS 1 | |
34 | #define P_ONESHOT 2 | |
35 | #define P_FREQUENCY 3 | |
36 | #define P_PERIOD 4 | |
37 | #define P_DEBUG 5 | |
38 | ||
39 | /* | |
40 | * Private globals. | |
41 | */ | |
42 | static int debugging_on = 0; /*Are we debugging? */ | |
43 | static int one_shot = 0; /*Single round of data collection? */ | |
44 | ||
45 | static char *opNames[] = { | |
46 | "FetchData", | |
47 | "FetchACL", | |
48 | "FetchStatus", | |
49 | "StoreData", | |
50 | "StoreACL", | |
51 | "StoreStatus", | |
52 | "RemoveFile", | |
53 | "CreateFile", | |
54 | "Rename", | |
55 | "Symlink", | |
56 | "Link", | |
57 | "MakeDir", | |
58 | "RemoveDir", | |
59 | "SetLock", | |
60 | "ExtendLock", | |
61 | "ReleaseLock", | |
62 | "GetStatistics", | |
63 | "GiveUpCallbacks", | |
64 | "GetVolumeInfo", | |
65 | "GetVolumeStatus", | |
66 | "SetVolumeStatus", | |
67 | "GetRootVolume", | |
68 | "CheckToken", | |
69 | "GetTime", | |
70 | "NGetVolumeInfo", | |
71 | "BulkStatus", | |
72 | "XStatsVersion", | |
73 | "GetXStats" | |
74 | }; | |
75 | ||
76 | static char *xferOpNames[] = { | |
77 | "FetchData", | |
78 | "StoreData" | |
79 | }; | |
80 | ||
81 | ||
82 | /*------------------------------------------------------------------------ | |
83 | * PrintCallInfo | |
84 | * | |
85 | * Description: | |
86 | * Print out the AFS_XSTATSCOLL_CALL_INFO collection we just | |
87 | * received. | |
88 | * | |
89 | * Arguments: | |
90 | * None. | |
91 | * | |
92 | * Returns: | |
93 | * Nothing. | |
94 | * | |
95 | * Environment: | |
96 | * All the info we need is nestled into xstat_fs_Results. | |
97 | * | |
98 | * Side Effects: | |
99 | * As advertised. | |
100 | *------------------------------------------------------------------------*/ | |
101 | ||
102 | void | |
103 | PrintCallInfo(void) | |
104 | { /*PrintCallInfo */ | |
105 | int i; /*Loop variable */ | |
106 | int numInt32s; /*# int32words returned */ | |
107 | afs_int32 *currInt32; /*Ptr to current afs_int32 value */ | |
108 | char *printableTime; /*Ptr to printable time string */ | |
109 | time_t probeTime = xstat_fs_Results.probeTime; | |
110 | ||
111 | /* | |
112 | * Just print out the results of the particular probe. | |
113 | */ | |
114 | numInt32s = xstat_fs_Results.data.AFS_CollData_len; | |
115 | currInt32 = (afs_int32 *) (xstat_fs_Results.data.AFS_CollData_val); | |
116 | printableTime = ctime(&probeTime); | |
117 | printableTime[strlen(printableTime) - 1] = '\0'; | |
118 | ||
119 | printf("AFS_XSTATSCOLL_CALL_INFO (coll %d) for FS %s\n[Probe %u, %s]\n\n", | |
120 | xstat_fs_Results.collectionNumber, | |
121 | xstat_fs_Results.connP->hostName, xstat_fs_Results.probeNum, | |
122 | printableTime); | |
123 | ||
124 | if (debugging_on) | |
125 | printf("\n[%u entries returned at %" AFS_PTR_FMT "]\n\n", numInt32s, currInt32); | |
126 | ||
127 | for (i = 0; i < numInt32s; i++) | |
128 | printf("%u ", *currInt32++); | |
129 | printf("\n"); | |
130 | ||
131 | } /*PrintCallInfo */ | |
132 | ||
133 | ||
134 | /*------------------------------------------------------------------------ | |
135 | * PrintOverallPerfInfo | |
136 | * | |
137 | * Description: | |
138 | * Print out overall performance numbers. | |
139 | * | |
140 | * Arguments: | |
141 | * a_ovP : Ptr to the overall performance numbers. | |
142 | * | |
143 | * Returns: | |
144 | * Nothing. | |
145 | * | |
146 | * Environment: | |
147 | * Nothing interesting. | |
148 | * | |
149 | * Side Effects: | |
150 | * As advertised. | |
151 | *------------------------------------------------------------------------*/ | |
152 | ||
153 | void | |
154 | PrintOverallPerfInfo(struct afs_PerfStats *a_ovP) | |
155 | { | |
156 | printf("\t%10u numPerfCalls\n\n", a_ovP->numPerfCalls); | |
157 | ||
158 | /* | |
159 | * Vnode cache section. | |
160 | */ | |
161 | printf("\t%10u vcache_L_Entries\n", a_ovP->vcache_L_Entries); | |
162 | printf("\t%10u vcache_L_Allocs\n", a_ovP->vcache_L_Allocs); | |
163 | printf("\t%10u vcache_L_Gets\n", a_ovP->vcache_L_Gets); | |
164 | printf("\t%10u vcache_L_Reads\n", a_ovP->vcache_L_Reads); | |
165 | printf("\t%10u vcache_L_Writes\n\n", a_ovP->vcache_L_Writes); | |
166 | ||
167 | printf("\t%10u vcache_S_Entries\n", a_ovP->vcache_S_Entries); | |
168 | printf("\t%10u vcache_S_Allocs\n", a_ovP->vcache_S_Allocs); | |
169 | printf("\t%10u vcache_S_Gets\n", a_ovP->vcache_S_Gets); | |
170 | printf("\t%10u vcache_S_Reads\n", a_ovP->vcache_S_Reads); | |
171 | printf("\t%10u vcache_S_Writes\n\n", a_ovP->vcache_S_Writes); | |
172 | ||
173 | printf("\t%10u vcache_H_Entries\n", a_ovP->vcache_H_Entries); | |
174 | printf("\t%10u vcache_H_Gets\n", a_ovP->vcache_H_Gets); | |
175 | printf("\t%10u vcache_H_Replacements\n\n", a_ovP->vcache_H_Replacements); | |
176 | ||
177 | /* | |
178 | * Directory package section. | |
179 | */ | |
180 | printf("\t%10u dir_Buffers\n", a_ovP->dir_Buffers); | |
181 | printf("\t%10u dir_Calls\n", a_ovP->dir_Calls); | |
182 | printf("\t%10u dir_IOs\n\n", a_ovP->dir_IOs); | |
183 | ||
184 | /* | |
185 | * Rx section. | |
186 | */ | |
187 | printf("\t%10u rx_packetRequests\n", a_ovP->rx_packetRequests); | |
188 | printf("\t%10u rx_noPackets_RcvClass\n", a_ovP->rx_noPackets_RcvClass); | |
189 | printf("\t%10u rx_noPackets_SendClass\n", a_ovP->rx_noPackets_SendClass); | |
190 | printf("\t%10u rx_noPackets_SpecialClass\n", | |
191 | a_ovP->rx_noPackets_SpecialClass); | |
192 | printf("\t%10u rx_socketGreedy\n", a_ovP->rx_socketGreedy); | |
193 | printf("\t%10u rx_bogusPacketOnRead\n", a_ovP->rx_bogusPacketOnRead); | |
194 | printf("\t%10u rx_bogusHost\n", a_ovP->rx_bogusHost); | |
195 | printf("\t%10u rx_noPacketOnRead\n", a_ovP->rx_noPacketOnRead); | |
196 | printf("\t%10u rx_noPacketBuffersOnRead\n", | |
197 | a_ovP->rx_noPacketBuffersOnRead); | |
198 | printf("\t%10u rx_selects\n", a_ovP->rx_selects); | |
199 | printf("\t%10u rx_sendSelects\n", a_ovP->rx_sendSelects); | |
200 | printf("\t%10u rx_packetsRead_RcvClass\n", | |
201 | a_ovP->rx_packetsRead_RcvClass); | |
202 | printf("\t%10u rx_packetsRead_SendClass\n", | |
203 | a_ovP->rx_packetsRead_SendClass); | |
204 | printf("\t%10u rx_packetsRead_SpecialClass\n", | |
205 | a_ovP->rx_packetsRead_SpecialClass); | |
206 | printf("\t%10u rx_dataPacketsRead\n", a_ovP->rx_dataPacketsRead); | |
207 | printf("\t%10u rx_ackPacketsRead\n", a_ovP->rx_ackPacketsRead); | |
208 | printf("\t%10u rx_dupPacketsRead\n", a_ovP->rx_dupPacketsRead); | |
209 | printf("\t%10u rx_spuriousPacketsRead\n", a_ovP->rx_spuriousPacketsRead); | |
210 | printf("\t%10u rx_packetsSent_RcvClass\n", | |
211 | a_ovP->rx_packetsSent_RcvClass); | |
212 | printf("\t%10u rx_packetsSent_SendClass\n", | |
213 | a_ovP->rx_packetsSent_SendClass); | |
214 | printf("\t%10u rx_packetsSent_SpecialClass\n", | |
215 | a_ovP->rx_packetsSent_SpecialClass); | |
216 | printf("\t%10u rx_ackPacketsSent\n", a_ovP->rx_ackPacketsSent); | |
217 | printf("\t%10u rx_pingPacketsSent\n", a_ovP->rx_pingPacketsSent); | |
218 | printf("\t%10u rx_abortPacketsSent\n", a_ovP->rx_abortPacketsSent); | |
219 | printf("\t%10u rx_busyPacketsSent\n", a_ovP->rx_busyPacketsSent); | |
220 | printf("\t%10u rx_dataPacketsSent\n", a_ovP->rx_dataPacketsSent); | |
221 | printf("\t%10u rx_dataPacketsReSent\n", a_ovP->rx_dataPacketsReSent); | |
222 | printf("\t%10u rx_dataPacketsPushed\n", a_ovP->rx_dataPacketsPushed); | |
223 | printf("\t%10u rx_ignoreAckedPacket\n", a_ovP->rx_ignoreAckedPacket); | |
224 | printf("\t%10u rx_totalRtt_Sec\n", a_ovP->rx_totalRtt_Sec); | |
225 | printf("\t%10u rx_totalRtt_Usec\n", a_ovP->rx_totalRtt_Usec); | |
226 | printf("\t%10u rx_minRtt_Sec\n", a_ovP->rx_minRtt_Sec); | |
227 | printf("\t%10u rx_minRtt_Usec\n", a_ovP->rx_minRtt_Usec); | |
228 | printf("\t%10u rx_maxRtt_Sec\n", a_ovP->rx_maxRtt_Sec); | |
229 | printf("\t%10u rx_maxRtt_Usec\n", a_ovP->rx_maxRtt_Usec); | |
230 | printf("\t%10u rx_nRttSamples\n", a_ovP->rx_nRttSamples); | |
231 | printf("\t%10u rx_nServerConns\n", a_ovP->rx_nServerConns); | |
232 | printf("\t%10u rx_nClientConns\n", a_ovP->rx_nClientConns); | |
233 | printf("\t%10u rx_nPeerStructs\n", a_ovP->rx_nPeerStructs); | |
234 | printf("\t%10u rx_nCallStructs\n", a_ovP->rx_nCallStructs); | |
235 | printf("\t%10u rx_nFreeCallStructs\n", a_ovP->rx_nFreeCallStructs); | |
236 | printf("\t%10u rx_nBusies\n\n", a_ovP->rx_nBusies); | |
237 | ||
238 | printf("\t%10u fs_nBusies\n", a_ovP->fs_nBusies); | |
239 | printf("\t%10u fs_GetCapabilities\n\n", a_ovP->fs_nGetCaps); | |
240 | /* | |
241 | * Host module fields. | |
242 | */ | |
243 | printf("\t%10u host_NumHostEntries\n", a_ovP->host_NumHostEntries); | |
244 | printf("\t%10u host_HostBlocks\n", a_ovP->host_HostBlocks); | |
245 | printf("\t%10u host_NonDeletedHosts\n", a_ovP->host_NonDeletedHosts); | |
246 | printf("\t%10u host_HostsInSameNetOrSubnet\n", | |
247 | a_ovP->host_HostsInSameNetOrSubnet); | |
248 | printf("\t%10u host_HostsInDiffSubnet\n", a_ovP->host_HostsInDiffSubnet); | |
249 | printf("\t%10u host_HostsInDiffNetwork\n", | |
250 | a_ovP->host_HostsInDiffNetwork); | |
251 | printf("\t%10u host_NumClients\n", a_ovP->host_NumClients); | |
252 | printf("\t%10u host_ClientBlocks\n\n", a_ovP->host_ClientBlocks); | |
253 | ||
254 | printf("\t%10u sysname_ID\n", a_ovP->sysname_ID); | |
255 | } | |
256 | ||
257 | ||
258 | /*------------------------------------------------------------------------ | |
259 | * PrintOpTiming | |
260 | * | |
261 | * Description: | |
262 | * Print out the contents of an RPC op timing structure. | |
263 | * | |
264 | * Arguments: | |
265 | * a_opIdx : Index of the AFS operation we're printing number on. | |
266 | * a_opTimeP : Ptr to the op timing structure to print. | |
267 | * | |
268 | * Returns: | |
269 | * Nothing. | |
270 | * | |
271 | * Environment: | |
272 | * Nothing interesting. | |
273 | * | |
274 | * Side Effects: | |
275 | * As advertised. | |
276 | *------------------------------------------------------------------------*/ | |
277 | ||
278 | void | |
279 | PrintOpTiming(int a_opIdx, struct fs_stats_opTimingData *a_opTimeP) | |
280 | { | |
281 | printf | |
282 | ("%15s: %u ops (%u OK); sum=%lu.%06lu, sqr=%lu.%06lu, min=%lu.%06lu, max=%lu.%06lu\n", | |
283 | opNames[a_opIdx], a_opTimeP->numOps, a_opTimeP->numSuccesses, | |
284 | (long)a_opTimeP->sumTime.tv_sec, (long)a_opTimeP->sumTime.tv_usec, | |
285 | (long)a_opTimeP->sqrTime.tv_sec, (long)a_opTimeP->sqrTime.tv_usec, | |
286 | (long)a_opTimeP->minTime.tv_sec, (long)a_opTimeP->minTime.tv_usec, | |
287 | (long)a_opTimeP->maxTime.tv_sec, (long)a_opTimeP->maxTime.tv_usec); | |
288 | } | |
289 | ||
290 | ||
291 | /*------------------------------------------------------------------------ | |
292 | * PrintXferTiming | |
293 | * | |
294 | * Description: | |
295 | * Print out the contents of a data transfer structure. | |
296 | * | |
297 | * Arguments: | |
298 | * a_opIdx : Index of the AFS operation we're printing number on. | |
299 | * a_xferP : Ptr to the data transfer structure to print. | |
300 | * | |
301 | * Returns: | |
302 | * Nothing. | |
303 | * | |
304 | * Environment: | |
305 | * Nothing interesting. | |
306 | * | |
307 | * Side Effects: | |
308 | * As advertised. | |
309 | *------------------------------------------------------------------------*/ | |
310 | ||
311 | void | |
312 | PrintXferTiming(int a_opIdx, struct fs_stats_xferData *a_xferP) | |
313 | { | |
314 | printf | |
315 | ("%s: %u xfers (%u OK), time sum=%lu.%06lu, sqr=%lu.%06lu, min=%lu.%06lu, max=%lu.%06lu\n", | |
316 | xferOpNames[a_opIdx], a_xferP->numXfers, a_xferP->numSuccesses, | |
317 | (long)a_xferP->sumTime.tv_sec, (long)a_xferP->sumTime.tv_usec, | |
318 | (long)a_xferP->sqrTime.tv_sec, (long)a_xferP->sqrTime.tv_usec, | |
319 | (long)a_xferP->minTime.tv_sec, (long)a_xferP->minTime.tv_usec, | |
320 | (long)a_xferP->maxTime.tv_sec, (long)a_xferP->maxTime.tv_usec); | |
321 | printf("\t[bytes: sum=%u, min=%u, max=%u]\n", a_xferP->sumBytes, | |
322 | a_xferP->minBytes, a_xferP->maxBytes); | |
323 | printf | |
324 | ("\t[buckets: 0: %u, 1: %u, 2: %u, 3: %u, 4: %u, 5: %u, 6: %u, 7: %u, 8: %u]\n", | |
325 | a_xferP->count[0], a_xferP->count[1], a_xferP->count[2], | |
326 | a_xferP->count[3], a_xferP->count[4], a_xferP->count[5], | |
327 | a_xferP->count[6], a_xferP->count[7], a_xferP->count[8]); | |
328 | } | |
329 | ||
330 | ||
331 | /*------------------------------------------------------------------------ | |
332 | * PrintDetailedPerfInfo | |
333 | * | |
334 | * Description: | |
335 | * Print out a set of detailed performance numbers. | |
336 | * | |
337 | * Arguments: | |
338 | * a_detP : Ptr to detailed perf numbers to print. | |
339 | * | |
340 | * Returns: | |
341 | * Nothing. | |
342 | * | |
343 | * Environment: | |
344 | * Nothing interesting. | |
345 | * | |
346 | * Side Effects: | |
347 | * As advertised. | |
348 | *------------------------------------------------------------------------*/ | |
349 | ||
350 | void | |
351 | PrintDetailedPerfInfo(struct fs_stats_DetailedStats *a_detP) | |
352 | { | |
353 | int currIdx; /*Loop variable */ | |
354 | ||
355 | printf("\t%10lu epoch\n", (long) a_detP->epoch.tv_sec); | |
356 | ||
357 | for (currIdx = 0; currIdx < FS_STATS_NUM_RPC_OPS; currIdx++) | |
358 | PrintOpTiming(currIdx, &(a_detP->rpcOpTimes[currIdx])); | |
359 | ||
360 | for (currIdx = 0; currIdx < FS_STATS_NUM_XFER_OPS; currIdx++) | |
361 | PrintXferTiming(currIdx, &(a_detP->xferOpTimes[currIdx])); | |
362 | } | |
363 | ||
364 | ||
365 | /*------------------------------------------------------------------------ | |
366 | * PrintFullPerfInfo | |
367 | * | |
368 | * Description: | |
369 | * Print out the AFS_XSTATSCOLL_FULL_PERF_INFO collection we just | |
370 | * received. | |
371 | * | |
372 | * Arguments: | |
373 | * None. | |
374 | * | |
375 | * Returns: | |
376 | * Nothing. | |
377 | * | |
378 | * Environment: | |
379 | * All the info we need is nestled into xstat_fs_Results. | |
380 | * | |
381 | * Side Effects: | |
382 | * As advertised. | |
383 | *------------------------------------------------------------------------*/ | |
384 | ||
385 | void | |
386 | PrintFullPerfInfo(void) | |
387 | { | |
388 | int code; | |
389 | struct fs_stats_FullPerfStats *fullPerfP; /*Ptr to full perf stats */ | |
390 | struct fs_stats_FullPerfStats buffer; /* to decode the stats */ | |
391 | char *printableTime; /*Ptr to printable time | |
392 | * string */ | |
393 | time_t probeTime = xstat_fs_Results.probeTime; | |
394 | static afs_int32 fullPerfInt32s = (sizeof(struct fs_stats_FullPerfStats) >> 2); /*Correct # int32s to rcv */ | |
395 | ||
396 | printableTime = ctime(&probeTime); | |
397 | printableTime[strlen(printableTime) - 1] = '\0'; | |
398 | printf | |
399 | ("AFS_XSTATSCOLL_FULL_PERF_INFO (coll %d) for FS %s\n[Probe %u, %s]\n\n", | |
400 | xstat_fs_Results.collectionNumber, xstat_fs_Results.connP->hostName, | |
401 | xstat_fs_Results.probeNum, printableTime); | |
402 | ||
403 | code = | |
404 | xstat_fs_DecodeFullPerfStats(&fullPerfP, | |
405 | xstat_fs_Results.data.AFS_CollData_val, | |
406 | xstat_fs_Results.data.AFS_CollData_len, | |
407 | &buffer); | |
408 | if (code) { | |
409 | afs_int32 numInt32s = xstat_fs_Results.data.AFS_CollData_len; /*# int32words received */ | |
410 | printf("** Data size mismatch in full performance collection!\n"); | |
411 | printf("** Expecting %u, got %u\n", fullPerfInt32s, numInt32s); | |
412 | } else { | |
413 | PrintOverallPerfInfo(&(fullPerfP->overall)); | |
414 | PrintDetailedPerfInfo(&(fullPerfP->det)); | |
415 | } | |
416 | } | |
417 | ||
418 | ||
419 | /*------------------------------------------------------------------------ | |
420 | * PrintPerfInfo | |
421 | * | |
422 | * Description: | |
423 | * Print out the AFS_XSTATSCOLL_PERF_INFO collection we just | |
424 | * received. | |
425 | * | |
426 | * Arguments: | |
427 | * None. | |
428 | * | |
429 | * Returns: | |
430 | * Nothing. | |
431 | * | |
432 | * Environment: | |
433 | * All the info we need is nestled into xstat_fs_Results. | |
434 | * | |
435 | * Side Effects: | |
436 | * As advertised. | |
437 | *------------------------------------------------------------------------*/ | |
438 | ||
439 | void | |
440 | PrintPerfInfo(void) | |
441 | { | |
442 | static afs_int32 perfInt32s = (sizeof(struct afs_PerfStats) >> 2); /*Correct # int32s to rcv */ | |
443 | afs_int32 numInt32s; /*# int32words received */ | |
444 | struct afs_PerfStats *perfP; /*Ptr to performance stats */ | |
445 | char *printableTime; /*Ptr to printable time string */ | |
446 | time_t probeTime = xstat_fs_Results.probeTime; | |
447 | ||
448 | numInt32s = xstat_fs_Results.data.AFS_CollData_len; | |
449 | if (numInt32s != perfInt32s) { | |
450 | printf("** Data size mismatch in performance collection!"); | |
451 | printf("** Expecting %u, got %u\n", perfInt32s, numInt32s); | |
452 | return; | |
453 | } | |
454 | ||
455 | printableTime = ctime(&probeTime); | |
456 | printableTime[strlen(printableTime) - 1] = '\0'; | |
457 | perfP = (struct afs_PerfStats *) | |
458 | (xstat_fs_Results.data.AFS_CollData_val); | |
459 | ||
460 | printf("AFS_XSTATSCOLL_PERF_INFO (coll %d) for FS %s\n[Probe %u, %s]\n\n", | |
461 | xstat_fs_Results.collectionNumber, | |
462 | xstat_fs_Results.connP->hostName, xstat_fs_Results.probeNum, | |
463 | printableTime); | |
464 | ||
465 | PrintOverallPerfInfo(perfP); | |
466 | } | |
467 | ||
468 | static char *CbCounterStrings[] = { | |
469 | "DeleteFiles", | |
470 | "DeleteCallBacks", | |
471 | "BreakCallBacks", | |
472 | "AddCallBack", | |
473 | "GotSomeSpaces", | |
474 | "DeleteAllCallBacks", | |
475 | "nFEs", "nCBs", "nblks", | |
476 | "CBsTimedOut", | |
477 | "nbreakers", | |
478 | "GSS1", "GSS2", "GSS3", "GSS4", "GSS5" | |
479 | }; | |
480 | ||
481 | ||
482 | void | |
483 | PrintCbCounters(void) { | |
484 | int numInt32s = sizeof(CbCounterStrings)/sizeof(char *); | |
485 | int i; | |
486 | afs_int32 *val=xstat_fs_Results.data.AFS_CollData_val; | |
487 | ||
488 | if (numInt32s > xstat_fs_Results.data.AFS_CollData_len) | |
489 | numInt32s = xstat_fs_Results.data.AFS_CollData_len; | |
490 | ||
491 | for (i=0; i<numInt32s; i++) { | |
492 | printf("\t%10u %s\n", val[i], CbCounterStrings[i]); | |
493 | } | |
494 | } | |
495 | ||
496 | ||
497 | /*------------------------------------------------------------------------ | |
498 | * FS_Handler | |
499 | * | |
500 | * Description: | |
501 | * Handler routine passed to the xstat_fs module. This handler is | |
502 | * called immediately after a poll of one of the File Servers has | |
503 | * taken place. All it needs to know is exported by the xstat_fs | |
504 | * module, namely the data structure where the probe results are | |
505 | * stored. | |
506 | * | |
507 | * Arguments: | |
508 | * None. | |
509 | * | |
510 | * Returns: | |
511 | * 0 on success, | |
512 | * -1 otherwise. | |
513 | * | |
514 | * Environment: | |
515 | * See above. All we do now is print out what we got. | |
516 | * | |
517 | * Side Effects: | |
518 | * As advertised. | |
519 | *------------------------------------------------------------------------*/ | |
520 | ||
521 | int | |
522 | FS_Handler(void) | |
523 | { | |
524 | static char rn[] = "FS_Handler"; /*Routine name */ | |
525 | ||
526 | printf | |
527 | ("\n------------------------------------------------------------\n"); | |
528 | ||
529 | /* | |
530 | * If the probe failed, there isn't much we can do except gripe. | |
531 | */ | |
532 | if (xstat_fs_Results.probeOK) { | |
533 | printf("%s: Probe %u to File Server '%s' failed, code=%d\n", rn, | |
534 | xstat_fs_Results.probeNum, xstat_fs_Results.connP->hostName, | |
535 | xstat_fs_Results.probeOK); | |
536 | return (0); | |
537 | } | |
538 | ||
539 | if (debugging_on) { | |
540 | int i; | |
541 | int numInt32s = xstat_fs_Results.data.AFS_CollData_len; | |
542 | afs_int32 *entry = xstat_fs_Results.data.AFS_CollData_val; | |
543 | ||
544 | printf("debug: got collection number %d\n", xstat_fs_Results.collectionNumber); | |
545 | printf("debug: collection data length is %d\n", numInt32s); | |
546 | for (i = 0; i < numInt32s; i++) { | |
547 | printf("debug: entry %d %u\n", i, entry[i]); | |
548 | } | |
549 | printf("\n"); | |
550 | } | |
551 | ||
552 | switch (xstat_fs_Results.collectionNumber) { | |
553 | case AFS_XSTATSCOLL_CALL_INFO: | |
554 | PrintCallInfo(); | |
555 | break; | |
556 | ||
557 | case AFS_XSTATSCOLL_PERF_INFO: | |
558 | PrintPerfInfo(); | |
559 | break; | |
560 | ||
561 | case AFS_XSTATSCOLL_FULL_PERF_INFO: | |
562 | PrintFullPerfInfo(); | |
563 | break; | |
564 | ||
565 | case AFS_XSTATSCOLL_CBSTATS: | |
566 | PrintCbCounters(); | |
567 | break; | |
568 | ||
569 | default: | |
570 | printf("** Unknown collection: %d\n", | |
571 | xstat_fs_Results.collectionNumber); | |
572 | } | |
573 | ||
574 | /* | |
575 | * Return the happy news. | |
576 | */ | |
577 | return (0); | |
578 | } | |
579 | ||
580 | ||
581 | /*------------------------------------------------------------------------ | |
582 | * CountListItems | |
583 | * | |
584 | * Description: | |
585 | * Given a pointer to the list of File Servers we'll be polling | |
586 | * (or, in fact, any list at all), compute the length of the list. | |
587 | * | |
588 | * Arguments: | |
589 | * struct cmd_item *a_firstItem : Ptr to first item in list. | |
590 | * | |
591 | * Returns: | |
592 | * Length of the above list. | |
593 | * | |
594 | * Environment: | |
595 | * Nothing interesting. | |
596 | * | |
597 | * Side Effects: | |
598 | * As advertised. | |
599 | *------------------------------------------------------------------------*/ | |
600 | ||
601 | static int | |
602 | CountListItems(struct cmd_item *a_firstItem) | |
603 | { | |
604 | ||
605 | int list_len; /*List length */ | |
606 | struct cmd_item *curr_item; /*Ptr to current item */ | |
607 | ||
608 | list_len = 0; | |
609 | curr_item = a_firstItem; | |
610 | ||
611 | /* | |
612 | * Count 'em up. | |
613 | */ | |
614 | while (curr_item) { | |
615 | list_len++; | |
616 | curr_item = curr_item->next; | |
617 | } | |
618 | ||
619 | /* | |
620 | * Return our tally. | |
621 | */ | |
622 | return (list_len); | |
623 | } | |
624 | ||
625 | ||
626 | /*------------------------------------------------------------------------ | |
627 | * RunTheTest | |
628 | * | |
629 | * Description: | |
630 | * Routine called by the command line interpreter to execute the | |
631 | * meat of the program. We count the number of File Servers | |
632 | * to watch, allocate enough space to remember all the connection | |
633 | * info for them, then go for it. | |
634 | * | |
635 | * | |
636 | * Arguments: | |
637 | * a_s : Ptr to the command line syntax descriptor. | |
638 | * | |
639 | * Returns: | |
640 | * 0, but may exit the whole program on an error! | |
641 | * | |
642 | * Environment: | |
643 | * Nothing interesting. | |
644 | * | |
645 | * Side Effects: | |
646 | * As advertised. | |
647 | *------------------------------------------------------------------------*/ | |
648 | ||
649 | int | |
650 | RunTheTest(struct cmd_syndesc *a_s, void *dummy) | |
651 | { | |
652 | static char rn[] = "RunTheTest"; /*Routine name */ | |
653 | int code; /*Return code */ | |
654 | int numFSs; /*# File Servers to monitor */ | |
655 | int numCollIDs; /*# collections to fetch */ | |
656 | int currFS; /*Loop index */ | |
657 | int currCollIDIdx; /*Index of current collection ID */ | |
658 | afs_int32 *collIDP; /*Ptr to array of collection IDs */ | |
659 | afs_int32 *currCollIDP; /*Ptr to current collection ID */ | |
660 | struct cmd_item *curr_item; /*Current FS cmd line record */ | |
661 | struct sockaddr_in FSSktArray[20]; /*File Server socket array - FIX! */ | |
662 | struct hostent *he; /*Host entry */ | |
663 | struct timeval tv; /*Time structure */ | |
664 | int sleep_secs; /*Number of seconds to sleep */ | |
665 | int initFlags; /*Flags passed to the init fcn */ | |
666 | int waitCode; /*Result of LWP_WaitProcess() */ | |
667 | int freq; /*Frequency of polls */ | |
668 | int period; /*Time in minutes of data collection */ | |
669 | ||
670 | /* | |
671 | * Are we doing one-shot measurements? | |
672 | */ | |
673 | if (a_s->parms[P_ONESHOT].items != 0) | |
674 | one_shot = 1; | |
675 | ||
676 | /* | |
677 | * Are we doing debugging output? | |
678 | */ | |
679 | if (a_s->parms[P_DEBUG].items != 0) | |
680 | debugging_on = 1; | |
681 | ||
682 | /* | |
683 | * Pull out the number of File Servers to watch and the number of | |
684 | * collections to get. | |
685 | */ | |
686 | numFSs = CountListItems(a_s->parms[P_FS_NAMES].items); | |
687 | numCollIDs = CountListItems(a_s->parms[P_COLL_IDS].items); | |
688 | ||
689 | /* Get the polling frequency */ | |
690 | if (a_s->parms[P_FREQUENCY].items != 0) | |
691 | freq = atoi(a_s->parms[P_FREQUENCY].items->data); | |
692 | else | |
693 | freq = 30; /* default to 30 seconds */ | |
694 | ||
695 | /* Get the time duration to run the tests */ | |
696 | if (a_s->parms[P_PERIOD].items != 0) | |
697 | period = atoi(a_s->parms[P_PERIOD].items->data); | |
698 | else | |
699 | period = 10; /* default to 10 minutes */ | |
700 | ||
701 | ||
702 | /* | |
703 | * Fill in the socket array for each of the File Servers listed. | |
704 | */ | |
705 | curr_item = a_s->parms[P_FS_NAMES].items; | |
706 | for (currFS = 0; currFS < numFSs; currFS++) { | |
707 | FSSktArray[currFS].sin_family = AF_INET; | |
708 | FSSktArray[currFS].sin_port = htons(7000); /* FileServer port */ | |
709 | he = hostutil_GetHostByName(curr_item->data); | |
710 | if (he == NULL) { | |
711 | fprintf(stderr, "[%s] Can't get host info for '%s'\n", rn, | |
712 | curr_item->data); | |
713 | exit(-1); | |
714 | } | |
715 | memcpy(&(FSSktArray[currFS].sin_addr.s_addr), he->h_addr, 4); | |
716 | ||
717 | /* | |
718 | * Move to the next File Server name. | |
719 | */ | |
720 | curr_item = curr_item->next; | |
721 | ||
722 | } /*Get socket info for each File Server */ | |
723 | ||
724 | /* | |
725 | * Create and fill up the array of desired collection IDs. | |
726 | */ | |
727 | if (debugging_on) | |
728 | printf("Allocating %d long(s) for coll ID\n", numCollIDs); | |
729 | ||
730 | if (numCollIDs > 0) | |
731 | collIDP = calloc(numCollIDs, sizeof(afs_int32)); | |
732 | else | |
733 | collIDP = NULL; | |
734 | ||
735 | currCollIDP = collIDP; | |
736 | curr_item = a_s->parms[P_COLL_IDS].items; | |
737 | for (currCollIDIdx = 0; currCollIDIdx < numCollIDs; currCollIDIdx++) { | |
738 | *currCollIDP = (afs_int32) (atoi(curr_item->data)); | |
739 | if (debugging_on) | |
740 | printf("CollID at index %d is %d\n", currCollIDIdx, *currCollIDP); | |
741 | curr_item = curr_item->next; | |
742 | currCollIDP++; | |
743 | }; | |
744 | ||
745 | /* | |
746 | * Crank up the File Server prober, then sit back and have fun. | |
747 | */ | |
748 | printf("\nStarting up the xstat_fs service, "); | |
749 | initFlags = 0; | |
750 | if (debugging_on) { | |
751 | initFlags |= XSTAT_FS_INITFLAG_DEBUGGING; | |
752 | printf("debugging enabled, "); | |
753 | } else | |
754 | printf("no debugging, "); | |
755 | if (one_shot) { | |
756 | initFlags |= XSTAT_FS_INITFLAG_ONE_SHOT; | |
757 | printf("one-shot operation\n"); | |
758 | } else | |
759 | printf("continuous operation\n"); | |
760 | ||
761 | code = xstat_fs_Init(numFSs, /*Num servers */ | |
762 | FSSktArray, /*File Server socket array */ | |
763 | freq, /*Probe frequency */ | |
764 | FS_Handler, /*Handler routine */ | |
765 | initFlags, /*Initialization flags */ | |
766 | numCollIDs, /*Number of collection IDs */ | |
767 | collIDP); /*Ptr to collection ID array */ | |
768 | if (code) { | |
769 | fprintf(stderr, "[%s] Error returned by xstat_fs_Init: %d\n", rn, | |
770 | code); | |
771 | xstat_fs_Cleanup(1); /*Get rid of malloc'ed structures */ | |
772 | exit(-1); | |
773 | } | |
774 | ||
775 | if (one_shot) { | |
776 | /* | |
777 | * One-shot operation; just wait for the collection to be done. | |
778 | */ | |
779 | if (debugging_on) | |
780 | printf("[%s] Calling LWP_WaitProcess() on event %" AFS_PTR_FMT "\n", rn, | |
781 | &terminationEvent); | |
782 | waitCode = LWP_WaitProcess(&terminationEvent); | |
783 | if (debugging_on) | |
784 | printf("[%s] Returned from LWP_WaitProcess()\n", rn); | |
785 | if (waitCode) { | |
786 | if (debugging_on) | |
787 | fprintf(stderr, | |
788 | "[%s] Error %d encountered by LWP_WaitProcess()\n", | |
789 | rn, waitCode); | |
790 | } | |
791 | } else { | |
792 | /* | |
793 | * Continuous operation. | |
794 | */ | |
795 | sleep_secs = 60 * period; /*length of data collection */ | |
796 | printf | |
797 | ("xstat_fs service started, main thread sleeping for %d secs.\n", | |
798 | sleep_secs); | |
799 | ||
800 | /* | |
801 | * Let's just fall asleep for a while, then we'll clean up. | |
802 | */ | |
803 | tv.tv_sec = sleep_secs; | |
804 | tv.tv_usec = 0; | |
805 | code = IOMGR_Select(0, /*Num fds */ | |
806 | 0, /*Descriptors ready for reading */ | |
807 | 0, /*Descriptors ready for writing */ | |
808 | 0, /*Descriptors with exceptional conditions */ | |
809 | &tv); /*Timeout structure */ | |
810 | if (code) { | |
811 | fprintf(stderr, | |
812 | "[%s] IOMGR_Select() returned non-zero value: %d\n", rn, | |
813 | code); | |
814 | } | |
815 | } | |
816 | ||
817 | /* | |
818 | * We're all done. Clean up, put the last nail in Rx, then | |
819 | * exit happily. | |
820 | */ | |
821 | if (debugging_on) | |
822 | printf("\nYawn, main thread just woke up. Cleaning things out...\n"); | |
823 | ||
824 | xstat_fs_Cleanup(1); /*Get rid of malloc'ed data */ | |
825 | rx_Finalize(); | |
826 | return (0); | |
827 | } | |
828 | ||
829 | ||
830 | #include "AFS_component_version_number.c" | |
831 | ||
832 | int | |
833 | main(int argc, char **argv) | |
834 | { | |
835 | static char rn[] = "xstat_fs_test"; /*Routine name */ | |
836 | afs_int32 code; /*Return code */ | |
837 | struct cmd_syndesc *ts; /*Ptr to cmd line syntax desc */ | |
838 | ||
839 | /* | |
840 | * Set up the commands we understand. | |
841 | */ | |
842 | ts = cmd_CreateSyntax("initcmd", RunTheTest, 0, 0, "initialize the program"); | |
843 | cmd_AddParm(ts, "-fsname", CMD_LIST, CMD_REQUIRED, | |
844 | "File Server name(s) to monitor"); | |
845 | cmd_AddParm(ts, "-collID", CMD_LIST, CMD_REQUIRED, | |
846 | "Collection(s) to fetch"); | |
847 | cmd_AddParm(ts, "-onceonly", CMD_FLAG, CMD_OPTIONAL, | |
848 | "Collect results exactly once, then quit"); | |
849 | cmd_AddParm(ts, "-frequency", CMD_SINGLE, CMD_OPTIONAL, | |
850 | "poll frequency, in seconds"); | |
851 | cmd_AddParm(ts, "-period", CMD_SINGLE, CMD_OPTIONAL, | |
852 | "data collection time, in minutes"); | |
853 | cmd_AddParm(ts, "-debug", CMD_FLAG, CMD_OPTIONAL, | |
854 | "turn on debugging output"); | |
855 | ||
856 | /* | |
857 | * Parse command-line switches & execute the test, then get the | |
858 | * heck out of here. | |
859 | */ | |
860 | code = cmd_Dispatch(argc, argv); | |
861 | if (code) { | |
862 | fprintf(stderr, "[%s] Call to cmd_Dispatch() failed; code is %d\n", | |
863 | rn, code); | |
864 | } | |
865 | ||
866 | exit(code); | |
867 | } |