Commit | Line | Data |
---|---|---|
805e021f CE |
1 | /* |
2 | * Copyright 2000, International Business Machines Corporation and others. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This software has been released under the terms of the IBM Public | |
6 | * License. For details, see the LICENSE file in the top-level source | |
7 | * directory or online at http://www.openafs.org/dl/license10.html | |
8 | */ | |
9 | ||
10 | /* RX: Globals for internal use, basically */ | |
11 | ||
12 | #ifndef AFS_RX_GLOBALS_H | |
13 | #define AFS_RX_GLOBALS_H | |
14 | ||
15 | ||
16 | #ifdef KERNEL | |
17 | #include "rx/rx.h" | |
18 | #else /* KERNEL */ | |
19 | # include "rx.h" | |
20 | #endif /* KERNEL */ | |
21 | ||
22 | #ifndef GLOBALSINIT | |
23 | #define GLOBALSINIT(x) | |
24 | #define POSTAMBLE | |
25 | #if defined(AFS_NT40_ENV) | |
26 | #define RX_STATS_INTERLOCKED 1 | |
27 | #if defined(AFS_PTHREAD_ENV) | |
28 | #define EXT __declspec(dllimport) extern | |
29 | #else /* AFS_PTHREAD_ENV */ | |
30 | #define EXT extern | |
31 | #endif /* AFS_PTHREAD_ENV */ | |
32 | #else /* AFS_NT40_ENV */ | |
33 | #define EXT extern | |
34 | #endif /* AFS_NT40_ENV */ | |
35 | #endif /* !GLOBALSINIT */ | |
36 | ||
37 | /* Basic socket for client requests; other sockets (for receiving server requests) are in the service structures */ | |
38 | EXT osi_socket rx_socket; | |
39 | ||
40 | /* The array of installed services. Null terminated. */ | |
41 | EXT struct rx_service *rx_services[RX_MAX_SERVICES + 1]; | |
42 | #ifdef RX_ENABLE_LOCKS | |
43 | /* Protects nRequestsRunning as well as pool allocation variables. */ | |
44 | EXT afs_kmutex_t rx_serverPool_lock; | |
45 | #endif /* RX_ENABLE_LOCKS */ | |
46 | ||
47 | /* Constant delay time before sending a hard ack if the receiver consumes | |
48 | * a packet while no delayed ack event is scheduled. Ensures that the | |
49 | * sender is able to advance its window when the receiver consumes a packet | |
50 | * after the sender has exhausted its transmit window. | |
51 | */ | |
52 | EXT struct clock rx_hardAckDelay; | |
53 | ||
54 | #if defined(RXDEBUG) || defined(AFS_NT40_ENV) | |
55 | /* Variable to allow introduction of network unreliability; exported from libafsrpc */ | |
56 | EXT int rx_intentionallyDroppedPacketsPer100 GLOBALSINIT(0); /* Dropped on Send */ | |
57 | EXT int rx_intentionallyDroppedOnReadPer100 GLOBALSINIT(0); /* Dropped on Read */ | |
58 | #endif | |
59 | ||
60 | /* extra packets to add to the quota */ | |
61 | EXT int rx_extraQuota GLOBALSINIT(0); | |
62 | /* extra packets to alloc (2 * maxWindowSize by default) */ | |
63 | EXT int rx_extraPackets GLOBALSINIT(256); | |
64 | ||
65 | EXT int rx_stackSize GLOBALSINIT(RX_DEFAULT_STACK_SIZE); | |
66 | ||
67 | /* Time until an unresponsive connection is declared dead */ | |
68 | EXT int rx_connDeadTime GLOBALSINIT(12); | |
69 | ||
70 | /* Set rx default connection dead time; set on both services and connections at creation time */ | |
71 | #ifdef AFS_NT40_ENV | |
72 | void rx_SetRxDeadTime(int seconds); | |
73 | #else | |
74 | #define rx_SetRxDeadTime(seconds) (rx_connDeadTime = (seconds)) | |
75 | #endif | |
76 | ||
77 | /* Time until we toss an idle connection */ | |
78 | EXT int rx_idleConnectionTime GLOBALSINIT(700); | |
79 | /* Time until we toss a peer structure, after all connections using are gone */ | |
80 | EXT int rx_idlePeerTime GLOBALSINIT(60); | |
81 | ||
82 | /* The file server is temporarily salvaging */ | |
83 | EXT int rx_tranquil GLOBALSINIT(0); | |
84 | ||
85 | /* UDP rcv buffer size */ | |
86 | EXT int rx_UdpBufSize GLOBALSINIT(64 * 1024); | |
87 | #ifdef AFS_NT40_ENV | |
88 | int rx_GetMinUdpBufSize(void); | |
89 | void rx_SetUdpBufSize(int x); | |
90 | #else | |
91 | #define rx_GetMinUdpBufSize() (64*1024) | |
92 | #define rx_SetUdpBufSize(x) (((x)>rx_GetMinUdpBufSize()) ? (rx_UdpBufSize = (x)):0) | |
93 | #endif | |
94 | /* | |
95 | * Variables to control RX overload management. When the number of calls | |
96 | * waiting for a thread exceed the threshold, new calls are aborted | |
97 | * with the busy error. | |
98 | */ | |
99 | EXT int rx_BusyThreshold GLOBALSINIT(-1); /* default is disabled */ | |
100 | EXT int rx_BusyError GLOBALSINIT(-1); | |
101 | ||
102 | /* These definitions should be in one place */ | |
103 | #ifdef AFS_SUN5_ENV | |
104 | #define RX_CBUF_TIME 180 /* Check for packet deficit */ | |
105 | #define RX_REAP_TIME 90 /* Check for tossable connections every 90 seconds */ | |
106 | #else | |
107 | #define RX_CBUF_TIME 120 /* Check for packet deficit */ | |
108 | #define RX_REAP_TIME 60 /* Check for tossable connections every 60 seconds */ | |
109 | #endif | |
110 | ||
111 | #define RX_FAST_ACK_RATE 1 /* as of 3.4, ask for an ack every | |
112 | * other packet. */ | |
113 | ||
114 | EXT int rx_minPeerTimeout GLOBALSINIT(20); /* in milliseconds */ | |
115 | EXT int rx_minWindow GLOBALSINIT(1); | |
116 | EXT int rx_maxWindow GLOBALSINIT(RX_MAXACKS); /* must ack what we receive */ | |
117 | EXT int rx_initReceiveWindow GLOBALSINIT(16); /* how much to accept */ | |
118 | EXT int rx_maxReceiveWindow GLOBALSINIT(32); /* how much to accept */ | |
119 | EXT int rx_initSendWindow GLOBALSINIT(16); | |
120 | EXT int rx_maxSendWindow GLOBALSINIT(32); | |
121 | EXT int rx_nackThreshold GLOBALSINIT(3); /* Number NACKS to trigger congestion recovery */ | |
122 | EXT int rx_nDgramThreshold GLOBALSINIT(4); /* Number of packets before increasing | |
123 | * packets per datagram */ | |
124 | #define RX_MAX_FRAGS 4 | |
125 | EXT int rxi_nSendFrags GLOBALSINIT(RX_MAX_FRAGS); /* max fragments in a datagram */ | |
126 | EXT int rxi_nRecvFrags GLOBALSINIT(RX_MAX_FRAGS); | |
127 | EXT int rxi_OrphanFragSize GLOBALSINIT(512); | |
128 | ||
129 | #define RX_MAX_DGRAM_PACKETS 6 /* max packets per jumbogram */ | |
130 | ||
131 | EXT int rxi_nDgramPackets GLOBALSINIT(RX_MAX_DGRAM_PACKETS); | |
132 | /* allow n packets between soft acks */ | |
133 | EXT int rxi_SoftAckRate GLOBALSINIT(RX_FAST_ACK_RATE); | |
134 | /* consume n packets before sending hard ack, should be larger than above, | |
135 | but not absolutely necessary. If it's smaller, than fast receivers will | |
136 | send a soft ack, immediately followed by a hard ack. */ | |
137 | EXT int rxi_HardAckRate GLOBALSINIT(RX_FAST_ACK_RATE + 1); | |
138 | ||
139 | EXT int rx_nPackets GLOBALSINIT(0); /* preallocate packets with rx_extraPackets */ | |
140 | ||
141 | /* | |
142 | * pthreads thread-specific rx info support | |
143 | * the rx_ts_info_t struct is meant to support all kinds of | |
144 | * thread-specific rx data: | |
145 | * | |
146 | * _FPQ member contains a thread-specific free packet queue | |
147 | */ | |
148 | #ifdef AFS_PTHREAD_ENV | |
149 | EXT pthread_key_t rx_ts_info_key; | |
150 | typedef struct rx_ts_info_t { | |
151 | struct { | |
152 | struct opr_queue queue; | |
153 | int len; /* local queue length */ | |
154 | int delta; /* number of new packets alloc'd locally since last sync w/ global queue */ | |
155 | ||
156 | /* FPQ stats */ | |
157 | int checkin_ops; | |
158 | int checkin_xfer; | |
159 | int checkout_ops; | |
160 | int checkout_xfer; | |
161 | int gtol_ops; | |
162 | int gtol_xfer; | |
163 | int ltog_ops; | |
164 | int ltog_xfer; | |
165 | int lalloc_ops; | |
166 | int lalloc_xfer; | |
167 | int galloc_ops; | |
168 | int galloc_xfer; | |
169 | } _FPQ; | |
170 | struct rx_packet * local_special_packet; | |
171 | } rx_ts_info_t; | |
172 | EXT struct rx_ts_info_t * rx_ts_info_init(void); /* init function for thread-specific data struct */ | |
173 | #define RX_TS_INFO_GET(ts_info_p) \ | |
174 | do { \ | |
175 | ts_info_p = (struct rx_ts_info_t*)pthread_getspecific(rx_ts_info_key); \ | |
176 | if (ts_info_p == NULL) { \ | |
177 | opr_Verify((ts_info_p = rx_ts_info_init()) != NULL); \ | |
178 | } \ | |
179 | } while(0) | |
180 | #endif /* AFS_PTHREAD_ENV */ | |
181 | ||
182 | ||
183 | /* List of free packets */ | |
184 | /* in pthreads rx, free packet queue is now a two-tiered queueing system | |
185 | * in which the first tier is thread-specific, and the second tier is | |
186 | * a global free packet queue */ | |
187 | EXT struct opr_queue rx_freePacketQueue; | |
188 | #ifdef RX_TRACK_PACKETS | |
189 | #define RX_FPQ_MARK_FREE(p) \ | |
190 | do { \ | |
191 | if ((p)->flags & RX_PKTFLAG_FREE) \ | |
192 | osi_Panic("rx packet already free\n"); \ | |
193 | (p)->flags |= RX_PKTFLAG_FREE; \ | |
194 | (p)->flags &= ~(RX_PKTFLAG_TQ|RX_PKTFLAG_IOVQ|RX_PKTFLAG_RQ|RX_PKTFLAG_CP); \ | |
195 | (p)->length = 0; \ | |
196 | (p)->niovecs = 0; \ | |
197 | } while(0) | |
198 | #define RX_FPQ_MARK_USED(p) \ | |
199 | do { \ | |
200 | if (!((p)->flags & RX_PKTFLAG_FREE)) \ | |
201 | osi_Panic("rx packet not free\n"); \ | |
202 | (p)->flags = 0; /* clear RX_PKTFLAG_FREE, initialize the rest */ \ | |
203 | (p)->header.flags = 0; \ | |
204 | } while(0) | |
205 | #else | |
206 | #define RX_FPQ_MARK_FREE(p) \ | |
207 | do { \ | |
208 | (p)->length = 0; \ | |
209 | (p)->niovecs = 0; \ | |
210 | } while(0) | |
211 | #define RX_FPQ_MARK_USED(p) \ | |
212 | do { \ | |
213 | (p)->flags = 0; /* clear RX_PKTFLAG_FREE, initialize the rest */ \ | |
214 | (p)->header.flags = 0; \ | |
215 | } while(0) | |
216 | #endif | |
217 | #define RX_PACKET_IOV_INIT(p) \ | |
218 | do { \ | |
219 | (p)->wirevec[0].iov_base = (char *)((p)->wirehead); \ | |
220 | (p)->wirevec[0].iov_len = RX_HEADER_SIZE; \ | |
221 | (p)->wirevec[1].iov_base = (char *)((p)->localdata); \ | |
222 | (p)->wirevec[1].iov_len = RX_FIRSTBUFFERSIZE; \ | |
223 | } while(0) | |
224 | #define RX_PACKET_IOV_FULLINIT(p) \ | |
225 | do { \ | |
226 | (p)->wirevec[0].iov_base = (char *)((p)->wirehead); \ | |
227 | (p)->wirevec[0].iov_len = RX_HEADER_SIZE; \ | |
228 | (p)->wirevec[1].iov_base = (char *)((p)->localdata); \ | |
229 | (p)->wirevec[1].iov_len = RX_FIRSTBUFFERSIZE; \ | |
230 | (p)->niovecs = 2; \ | |
231 | (p)->length = RX_FIRSTBUFFERSIZE; \ | |
232 | } while(0) | |
233 | ||
234 | #ifdef RX_ENABLE_LOCKS | |
235 | EXT afs_kmutex_t rx_freePktQ_lock; | |
236 | #endif /* RX_ENABLE_LOCKS */ | |
237 | ||
238 | /*! | |
239 | * \brief Queue of allocated packets. | |
240 | * | |
241 | * This queue is used to keep track of the blocks of allocated packets. | |
242 | * This information is used when afs is being unmounted and the memory | |
243 | * used by those packets needs to be released. | |
244 | */ | |
245 | EXT struct opr_queue rx_mallocedPacketQueue; | |
246 | #ifdef RX_ENABLE_LOCKS | |
247 | EXT afs_kmutex_t rx_mallocedPktQ_lock; | |
248 | #endif /* RX_ENABLE_LOCKS */ | |
249 | ||
250 | #if defined(AFS_PTHREAD_ENV) && !defined(KERNEL) | |
251 | #define RX_ENABLE_TSFPQ | |
252 | EXT int rx_TSFPQGlobSize GLOBALSINIT(3); /* number of packets to transfer between global and local queues in one op */ | |
253 | EXT int rx_TSFPQLocalMax GLOBALSINIT(15); /* max number of packets on local FPQ before returning a glob to the global pool */ | |
254 | EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */ | |
255 | #define RX_TS_FPQ_FLUSH_GLOBAL 1 | |
256 | #define RX_TS_FPQ_PULL_GLOBAL 1 | |
257 | #define RX_TS_FPQ_ALLOW_OVERCOMMIT 1 | |
258 | /* | |
259 | * compute the localmax and globsize values from rx_TSFPQMaxProcs and rx_nPackets. | |
260 | * arbitarily set local max so that all threads consume 90% of packets, if all local queues are full. | |
261 | * arbitarily set transfer glob size to 20% of max local packet queue length. | |
262 | * also set minimum values of 15 and 3. Given the algorithms, the number of buffers allocated | |
263 | * by each call to AllocPacketBufs() will increase indefinitely without a cap on the transfer | |
264 | * glob size. A cap of 64 is selected because that will produce an allocation of greater than | |
265 | * three times that amount which is greater than half of ncalls * maxReceiveWindow. | |
266 | * Must be called under rx_packets_mutex. | |
267 | */ | |
268 | #define RX_TS_FPQ_COMPUTE_LIMITS \ | |
269 | do { \ | |
270 | int newmax, newglob; \ | |
271 | newmax = (rx_nPackets * 9) / (10 * rx_TSFPQMaxProcs); \ | |
272 | newmax = (newmax >= 15) ? newmax : 15; \ | |
273 | newglob = newmax / 5; \ | |
274 | newglob = (newglob >= 3) ? (newglob < 64 ? newglob : 64) : 3; \ | |
275 | rx_TSFPQLocalMax = newmax; \ | |
276 | rx_TSFPQGlobSize = newglob; \ | |
277 | } while(0) | |
278 | /* record the number of packets allocated by this thread | |
279 | * and stored in the thread local queue */ | |
280 | #define RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info_p,num_alloc) \ | |
281 | do { \ | |
282 | (rx_ts_info_p)->_FPQ.lalloc_ops++; \ | |
283 | (rx_ts_info_p)->_FPQ.lalloc_xfer += num_alloc; \ | |
284 | } while (0) | |
285 | /* record the number of packets allocated by this thread | |
286 | * and stored in the global queue */ | |
287 | #define RX_TS_FPQ_GLOBAL_ALLOC(rx_ts_info_p,num_alloc) \ | |
288 | do { \ | |
289 | (rx_ts_info_p)->_FPQ.galloc_ops++; \ | |
290 | (rx_ts_info_p)->_FPQ.galloc_xfer += num_alloc; \ | |
291 | } while (0) | |
292 | /* move packets from local (thread-specific) to global free packet queue. | |
293 | rx_freePktQ_lock must be held. default is to reduce the queue size to 40% ofmax */ | |
294 | #define RX_TS_FPQ_LTOG(rx_ts_info_p) \ | |
295 | do { \ | |
296 | int i; \ | |
297 | struct rx_packet * p; \ | |
298 | int tsize = MIN((rx_ts_info_p)->_FPQ.len, (rx_ts_info_p)->_FPQ.len - rx_TSFPQLocalMax + 3 * rx_TSFPQGlobSize); \ | |
299 | if (tsize <= 0) break; \ | |
300 | for (i=0,p=opr_queue_Last(&((rx_ts_info_p)->_FPQ.queue), \ | |
301 | struct rx_packet, entry); \ | |
302 | i < tsize; i++,p=opr_queue_Prev(&p->entry, \ | |
303 | struct rx_packet, entry )); \ | |
304 | opr_queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ.queue), \ | |
305 | &rx_freePacketQueue, &p->entry); \ | |
306 | (rx_ts_info_p)->_FPQ.len -= tsize; \ | |
307 | rx_nFreePackets += tsize; \ | |
308 | (rx_ts_info_p)->_FPQ.ltog_ops++; \ | |
309 | (rx_ts_info_p)->_FPQ.ltog_xfer += tsize; \ | |
310 | if ((rx_ts_info_p)->_FPQ.delta) { \ | |
311 | MUTEX_ENTER(&rx_packets_mutex); \ | |
312 | RX_TS_FPQ_COMPUTE_LIMITS; \ | |
313 | MUTEX_EXIT(&rx_packets_mutex); \ | |
314 | (rx_ts_info_p)->_FPQ.delta = 0; \ | |
315 | } \ | |
316 | } while(0) | |
317 | /* same as above, except user has direct control over number to transfer */ | |
318 | #define RX_TS_FPQ_LTOG2(rx_ts_info_p,num_transfer) \ | |
319 | do { \ | |
320 | int i; \ | |
321 | struct rx_packet * p; \ | |
322 | if (num_transfer <= 0) break; \ | |
323 | for (i=0,p=opr_queue_Last(&((rx_ts_info_p)->_FPQ.queue), \ | |
324 | struct rx_packet, entry ); \ | |
325 | i < (num_transfer); \ | |
326 | i++,p=opr_queue_Prev(&p->entry, struct rx_packet, entry )); \ | |
327 | opr_queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ.queue), \ | |
328 | &rx_freePacketQueue, &p->entry); \ | |
329 | (rx_ts_info_p)->_FPQ.len -= (num_transfer); \ | |
330 | rx_nFreePackets += (num_transfer); \ | |
331 | (rx_ts_info_p)->_FPQ.ltog_ops++; \ | |
332 | (rx_ts_info_p)->_FPQ.ltog_xfer += (num_transfer); \ | |
333 | if ((rx_ts_info_p)->_FPQ.delta) { \ | |
334 | MUTEX_ENTER(&rx_packets_mutex); \ | |
335 | RX_TS_FPQ_COMPUTE_LIMITS; \ | |
336 | MUTEX_EXIT(&rx_packets_mutex); \ | |
337 | (rx_ts_info_p)->_FPQ.delta = 0; \ | |
338 | } \ | |
339 | } while(0) | |
340 | /* move packets from global to local (thread-specific) free packet queue. | |
341 | rx_freePktQ_lock must be held. */ | |
342 | #define RX_TS_FPQ_GTOL(rx_ts_info_p) \ | |
343 | do { \ | |
344 | int i, tsize; \ | |
345 | struct rx_packet * p; \ | |
346 | tsize = (rx_TSFPQGlobSize <= rx_nFreePackets) ? \ | |
347 | rx_TSFPQGlobSize : rx_nFreePackets; \ | |
348 | for (i=0, \ | |
349 | p=opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry); \ | |
350 | i < tsize; \ | |
351 | i++,p=opr_queue_Next(&p->entry, struct rx_packet, entry)); \ | |
352 | opr_queue_SplitBeforeAppend(&rx_freePacketQueue, \ | |
353 | &((rx_ts_info_p)->_FPQ.queue), &p->entry); \ | |
354 | (rx_ts_info_p)->_FPQ.len += i; \ | |
355 | rx_nFreePackets -= i; \ | |
356 | (rx_ts_info_p)->_FPQ.gtol_ops++; \ | |
357 | (rx_ts_info_p)->_FPQ.gtol_xfer += i; \ | |
358 | } while(0) | |
359 | /* same as above, except user has direct control over number to transfer */ | |
360 | #define RX_TS_FPQ_GTOL2(rx_ts_info_p,num_transfer) \ | |
361 | do { \ | |
362 | int i, tsize; \ | |
363 | struct rx_packet * p; \ | |
364 | tsize = (num_transfer); \ | |
365 | if (tsize > rx_nFreePackets) tsize = rx_nFreePackets; \ | |
366 | for (i=0, \ | |
367 | p=opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry); \ | |
368 | i < tsize; \ | |
369 | i++, p=opr_queue_Next(&p->entry, struct rx_packet, entry)); \ | |
370 | opr_queue_SplitBeforeAppend(&rx_freePacketQueue, \ | |
371 | &((rx_ts_info_p)->_FPQ.queue), &p->entry); \ | |
372 | (rx_ts_info_p)->_FPQ.len += i; \ | |
373 | rx_nFreePackets -= i; \ | |
374 | (rx_ts_info_p)->_FPQ.gtol_ops++; \ | |
375 | (rx_ts_info_p)->_FPQ.gtol_xfer += i; \ | |
376 | } while(0) | |
377 | /* checkout a packet from the thread-specific free packet queue */ | |
378 | #define RX_TS_FPQ_CHECKOUT(rx_ts_info_p,p) \ | |
379 | do { \ | |
380 | (p) = opr_queue_First(&((rx_ts_info_p)->_FPQ.queue), \ | |
381 | struct rx_packet, entry); \ | |
382 | opr_queue_Remove(&p->entry); \ | |
383 | RX_FPQ_MARK_USED(p); \ | |
384 | (rx_ts_info_p)->_FPQ.len--; \ | |
385 | (rx_ts_info_p)->_FPQ.checkout_ops++; \ | |
386 | (rx_ts_info_p)->_FPQ.checkout_xfer++; \ | |
387 | } while(0) | |
388 | /* checkout multiple packets from the thread-specific free packet queue. | |
389 | * num_transfer must be a variable. | |
390 | */ | |
391 | #define RX_TS_FPQ_QCHECKOUT(rx_ts_info_p,num_transfer,q) \ | |
392 | do { \ | |
393 | int i; \ | |
394 | struct rx_packet *p; \ | |
395 | if (num_transfer > (rx_ts_info_p)->_FPQ.len) num_transfer = (rx_ts_info_p)->_FPQ.len; \ | |
396 | for (i=0, p=opr_queue_First(&((rx_ts_info_p)->_FPQ.queue), \ | |
397 | struct rx_packet, entry); \ | |
398 | i < num_transfer; \ | |
399 | i++, p=opr_queue_Next(&p->entry, struct rx_packet, entry)) { \ | |
400 | RX_FPQ_MARK_USED(p); \ | |
401 | } \ | |
402 | opr_queue_SplitBeforeAppend(&((rx_ts_info_p)->_FPQ.queue),(q), \ | |
403 | &((p)->entry)); \ | |
404 | (rx_ts_info_p)->_FPQ.len -= num_transfer; \ | |
405 | (rx_ts_info_p)->_FPQ.checkout_ops++; \ | |
406 | (rx_ts_info_p)->_FPQ.checkout_xfer += num_transfer; \ | |
407 | } while(0) | |
408 | /* check a packet into the thread-specific free packet queue */ | |
409 | #define RX_TS_FPQ_CHECKIN(rx_ts_info_p,p) \ | |
410 | do { \ | |
411 | opr_queue_Prepend(&((rx_ts_info_p)->_FPQ.queue), &((p)->entry)); \ | |
412 | RX_FPQ_MARK_FREE(p); \ | |
413 | (rx_ts_info_p)->_FPQ.len++; \ | |
414 | (rx_ts_info_p)->_FPQ.checkin_ops++; \ | |
415 | (rx_ts_info_p)->_FPQ.checkin_xfer++; \ | |
416 | } while(0) | |
417 | /* check multiple packets into the thread-specific free packet queue */ | |
418 | /* num_transfer must equal length of (q); it is not a means of checking | |
419 | * in part of (q). passing num_transfer just saves us instructions | |
420 | * since caller already knows length of (q) for other reasons */ | |
421 | #define RX_TS_FPQ_QCHECKIN(rx_ts_info_p,num_transfer,q) \ | |
422 | do { \ | |
423 | struct opr_queue *cur; \ | |
424 | for (opr_queue_Scan((q), cur)) { \ | |
425 | RX_FPQ_MARK_FREE(opr_queue_Entry(cur, struct rx_packet, entry)); \ | |
426 | } \ | |
427 | opr_queue_SplicePrepend(&((rx_ts_info_p)->_FPQ.queue),(q)); \ | |
428 | (rx_ts_info_p)->_FPQ.len += (num_transfer); \ | |
429 | (rx_ts_info_p)->_FPQ.checkin_ops++; \ | |
430 | (rx_ts_info_p)->_FPQ.checkin_xfer += (num_transfer); \ | |
431 | } while(0) | |
432 | #endif /* AFS_PTHREAD_ENV && !KERNEL */ | |
433 | ||
434 | /* Number of free packets */ | |
435 | EXT int rx_nFreePackets GLOBALSINIT(0); | |
436 | EXT int rxi_NeedMorePackets GLOBALSINIT(0); | |
437 | EXT int rx_packetReclaims GLOBALSINIT(0); | |
438 | ||
439 | /* largest packet which we can safely receive, initialized to AFS 3.2 value | |
440 | * This is provided for backward compatibility with peers which may be unable | |
441 | * to swallow anything larger. THIS MUST NEVER DECREASE WHILE AN APPLICATION | |
442 | * IS RUNNING! */ | |
443 | EXT afs_uint32 rx_maxReceiveSize GLOBALSINIT(_OLD_MAX_PACKET_SIZE * RX_MAX_FRAGS + | |
444 | UDP_HDR_SIZE * (RX_MAX_FRAGS - 1)); | |
445 | ||
446 | /* this is the maximum packet size that the user wants us to receive */ | |
447 | /* this is set by rxTune if required */ | |
448 | EXT afs_uint32 rx_maxReceiveSizeUser GLOBALSINIT(0xffffffff); | |
449 | ||
450 | /* rx_MyMaxSendSize is the size of the largest packet we will send, | |
451 | * including the RX header. Just as rx_maxReceiveSize is the | |
452 | * max we will receive, including the rx header. | |
453 | */ | |
454 | EXT afs_uint32 rx_MyMaxSendSize GLOBALSINIT(8588); | |
455 | ||
456 | /* Maximum size of a jumbo datagram we can receive */ | |
457 | EXT afs_uint32 rx_maxJumboRecvSize GLOBALSINIT(RX_MAX_PACKET_SIZE); | |
458 | ||
459 | /* need this to permit progs to run on AIX systems */ | |
460 | EXT int (*rxi_syscallp) (afs_uint32 a3, afs_uint32 a4, void *a5)GLOBALSINIT(0); | |
461 | ||
462 | /* List of free queue entries */ | |
463 | EXT struct rx_serverQueueEntry *rx_FreeSQEList GLOBALSINIT(0); | |
464 | #ifdef RX_ENABLE_LOCKS | |
465 | EXT afs_kmutex_t freeSQEList_lock; | |
466 | #endif | |
467 | ||
468 | /* List of free call structures */ | |
469 | EXT struct opr_queue rx_freeCallQueue; | |
470 | #ifdef RX_ENABLE_LOCKS | |
471 | EXT afs_kmutex_t rx_freeCallQueue_lock; | |
472 | #endif | |
473 | EXT afs_int32 rxi_nCalls GLOBALSINIT(0); | |
474 | ||
475 | /* Port requested at rx_Init. If this is zero, the actual port used will be different--but it will only be used for client operations. If non-zero, server provided services may use the same port. */ | |
476 | EXT u_short rx_port; | |
477 | ||
478 | #if !defined(KERNEL) && !defined(AFS_PTHREAD_ENV) | |
479 | /* 32-bit select Mask for rx_Listener. */ | |
480 | EXT fd_set rx_selectMask; | |
481 | EXT osi_socket rx_maxSocketNumber; /* Maximum socket number in the select mask. */ | |
482 | /* Minumum socket number in the select mask. */ | |
483 | EXT osi_socket rx_minSocketNumber GLOBALSINIT(0x7fffffff); | |
484 | #endif | |
485 | ||
486 | /* This is actually the minimum number of packets that must remain free, | |
487 | overall, immediately after a packet of the requested class has been | |
488 | allocated. *WARNING* These must be assigned with a great deal of care. | |
489 | In order, these are receive quota, send quota, special quota, receive | |
490 | continuation quota, and send continuation quota. */ | |
491 | #define RX_PACKET_QUOTAS {1, 10, 0, 1, 10} | |
492 | /* value large enough to guarantee that no allocation fails due to RX_PACKET_QUOTAS. | |
493 | Make it a little bigger, just for fun */ | |
494 | #define RX_MAX_QUOTA 15 /* part of min packet computation */ | |
495 | EXT int rx_packetQuota[RX_N_PACKET_CLASSES] GLOBALSINIT(RX_PACKET_QUOTAS); | |
496 | EXT int meltdown_1pkt GLOBALSINIT(1); /* prefer to schedule single-packet calls */ | |
497 | EXT int rxi_md2cnt GLOBALSINIT(0); /* counter of skipped calls */ | |
498 | EXT int rxi_2dchoice GLOBALSINIT(1); /* keep track of another call to schedule */ | |
499 | ||
500 | /* quota system: each attached server process must be able to make | |
501 | progress to avoid system deadlock, so we ensure that we can always | |
502 | handle the arrival of the next unacknowledged data packet for an | |
503 | attached call. rxi_dataQuota gives the max # of packets that must be | |
504 | reserved for active calls for them to be able to make progress, which is | |
505 | essentially enough to queue up a window-full of packets (the first packet | |
506 | may be missing, so these may not get read) + the # of packets the thread | |
507 | may use before reading all of its input (# free must be one more than send | |
508 | packet quota). Thus, each thread allocates rx_maxReceiveWindow+1 (max | |
509 | queued packets) + an extra for sending data. The system also reserves | |
510 | RX_MAX_QUOTA (must be more than RX_PACKET_QUOTA[i], which is 10), so that | |
511 | the extra packet can be sent (must be under the system-wide send packet | |
512 | quota to send any packets) */ | |
513 | /* # to reserve so that thread with input can still make calls (send packets) | |
514 | without blocking */ | |
515 | EXT int rxi_dataQuota GLOBALSINIT(RX_MAX_QUOTA); /* packets to reserve for active threads */ | |
516 | ||
517 | EXT afs_int32 rxi_availProcs GLOBALSINIT(0); /* number of threads in the pool */ | |
518 | EXT afs_int32 rxi_totalMin GLOBALSINIT(0); /* Sum(minProcs) forall services */ | |
519 | EXT afs_int32 rxi_minDeficit GLOBALSINIT(0); /* number of procs needed to handle all minProcs */ | |
520 | ||
521 | EXT afs_uint32 rx_nextCid; /* Next connection call id */ | |
522 | EXT afs_uint32 rx_epoch; /* Initialization time of rx */ | |
523 | #ifdef RX_ENABLE_LOCKS | |
524 | EXT afs_kcondvar_t rx_waitingForPackets_cv; | |
525 | #endif | |
526 | EXT char rx_waitingForPackets; /* Processes set and wait on this variable when waiting for packet buffers */ | |
527 | ||
528 | EXT struct rx_peer **rx_peerHashTable; | |
529 | EXT struct rx_connection **rx_connHashTable; | |
530 | EXT struct rx_connection *rx_connCleanup_list GLOBALSINIT(0); | |
531 | EXT afs_uint32 rx_hashTableSize GLOBALSINIT(257); /* Prime number */ | |
532 | #ifdef RX_ENABLE_LOCKS | |
533 | EXT afs_kmutex_t rx_peerHashTable_lock; | |
534 | EXT afs_kmutex_t rx_connHashTable_lock; | |
535 | #endif /* RX_ENABLE_LOCKS */ | |
536 | ||
537 | #define CONN_HASH(host, port, cid, epoch, type) ((((cid)>>RX_CIDSHIFT)%rx_hashTableSize)) | |
538 | ||
539 | #define PEER_HASH(host, port) ((host ^ port) % rx_hashTableSize) | |
540 | ||
541 | /* Forward definitions of internal procedures */ | |
542 | ||
543 | #define rxi_AllocSecurityObject() rxi_Alloc(sizeof(struct rx_securityClass)) | |
544 | #define rxi_FreeSecurityObject(obj) rxi_Free(obj, sizeof(struct rx_securityClass)) | |
545 | #define rxi_AllocService() rxi_Alloc(sizeof(struct rx_service)) | |
546 | #define rxi_FreeService(obj) \ | |
547 | do { \ | |
548 | MUTEX_DESTROY(&(obj)->svc_data_lock); \ | |
549 | rxi_Free((obj), sizeof(struct rx_service)); \ | |
550 | } while (0) | |
551 | #define rxi_AllocPeer() rxi_Alloc(sizeof(struct rx_peer)) | |
552 | #define rxi_FreePeer(peer) rxi_Free(peer, sizeof(struct rx_peer)) | |
553 | #define rxi_AllocConnection() rxi_Alloc(sizeof(struct rx_connection)) | |
554 | #define rxi_FreeConnection(conn) (rxi_Free(conn, sizeof(struct rx_connection))) | |
555 | ||
556 | EXT afs_int32 rx_stats_active GLOBALSINIT(1); /* boolean - rx statistics gathering */ | |
557 | ||
558 | #ifndef KERNEL | |
559 | /* Some debugging stuff */ | |
560 | EXT FILE *rx_debugFile; /* Set by the user to a stdio file for debugging output */ | |
561 | EXT FILE *rxevent_debugFile; /* Set to an stdio descriptor for event logging to that file */ | |
562 | #endif | |
563 | ||
564 | #ifdef RXDEBUG | |
565 | # define rx_Log rx_debugFile | |
566 | # ifdef AFS_NT40_ENV | |
567 | EXT int rxdebug_active; | |
568 | # define dpf(args) do { if (rxdebug_active) rxi_DebugPrint args; } while (0) | |
569 | # else | |
570 | # ifdef DPF_FSLOG | |
571 | # include <afs/afsutil.h> | |
572 | # define dpf(args) FSLog args | |
573 | # else | |
574 | # define dpf(args) do { if (rx_debugFile) rxi_DebugPrint args; } while (0) | |
575 | # endif | |
576 | # endif | |
577 | # define rx_Log_event rxevent_debugFile | |
578 | #else | |
579 | # define dpf(args) | |
580 | #endif /* RXDEBUG */ | |
581 | ||
582 | EXT char *rx_packetTypes[RX_N_PACKET_TYPES] GLOBALSINIT(RX_PACKET_TYPES); /* Strings defined in rx.h */ | |
583 | ||
584 | #ifndef KERNEL | |
585 | /* | |
586 | * Counter used to implement connection specific data | |
587 | */ | |
588 | EXT int rxi_keyCreate_counter GLOBALSINIT(0); | |
589 | /* | |
590 | * Array of function pointers used to destory connection specific data | |
591 | */ | |
592 | EXT rx_destructor_t *rxi_keyCreate_destructor GLOBALSINIT(NULL); | |
593 | #ifdef RX_ENABLE_LOCKS | |
594 | EXT afs_kmutex_t rxi_keyCreate_lock; | |
595 | #endif /* RX_ENABLE_LOCKS */ | |
596 | #endif /* !KERNEL */ | |
597 | ||
598 | /* | |
599 | * SERVER ONLY: Threshholds used to throttle error replies to looping | |
600 | * clients. When consecutive calls are aborting with the same error, the | |
601 | * server throttles the client by waiting before sending error messages. | |
602 | * Disabled if abort thresholds are zero. | |
603 | */ | |
604 | EXT int rxi_connAbortThreshhold GLOBALSINIT(0); | |
605 | EXT int rxi_connAbortDelay GLOBALSINIT(3000); | |
606 | EXT int rxi_callAbortThreshhold GLOBALSINIT(0); | |
607 | EXT int rxi_callAbortDelay GLOBALSINIT(3000); | |
608 | ||
609 | /* | |
610 | * Thread specific thread ID used to implement LWP_Index(). | |
611 | */ | |
612 | ||
613 | #if defined(AFS_PTHREAD_ENV) | |
614 | EXT int rxi_fcfs_thread_num GLOBALSINIT(0); | |
615 | EXT pthread_key_t rx_thread_id_key; | |
616 | #else | |
617 | #define rxi_fcfs_thread_num (0) | |
618 | #endif | |
619 | ||
620 | #if defined(RX_ENABLE_LOCKS) | |
621 | EXT afs_kmutex_t rx_waiting_mutex POSTAMBLE; /* used to protect waiting counters */ | |
622 | EXT afs_kmutex_t rx_quota_mutex POSTAMBLE; /* used to protect quota counters */ | |
623 | EXT afs_kmutex_t rx_pthread_mutex POSTAMBLE; /* used to protect pthread counters */ | |
624 | EXT afs_kmutex_t rx_packets_mutex POSTAMBLE; /* used to protect packet counters */ | |
625 | EXT afs_kmutex_t rx_refcnt_mutex POSTAMBLE; /* used to protect conn/call ref counts */ | |
626 | #endif | |
627 | ||
628 | EXT int rx_enable_stats GLOBALSINIT(0); | |
629 | ||
630 | /* | |
631 | * Set this flag to enable the listener thread to trade places with an idle | |
632 | * worker thread to move the context switch from listener to worker out of | |
633 | * the request path. | |
634 | */ | |
635 | EXT int rx_enable_hot_thread GLOBALSINIT(0); | |
636 | ||
637 | EXT int RX_IPUDP_SIZE GLOBALSINIT(_RX_IPUDP_SIZE); | |
638 | #endif /* AFS_RX_GLOBALS_H */ |