Import Upstream version 1.8.5
[hcoop/debian/openafs.git] / src / rx / rx_call.h
1 /*
2 * Copyright 2000, International Business Machines Corporation and others.
3 * All Rights Reserved.
4 *
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
8 */
9
10 #ifndef OPENAFS_RX_CALL_H
11 #define OPENAFS_RX_CALL_H 1
12
13 /*
14 * The following fields are accessed while the call is unlocked.
15 * These fields are used by the application thread to marshall
16 * and unmarshall RPC data. The only time they may be changed by
17 * other threads is when the RX_CALL_IOVEC_WAIT flag is set
18 *
19 * NOTE: Ensure that this structure is padded to a double word boundary
20 * to avoid problems with other threads accessing items stored beside it
21 * in the call structure
22 */
23 struct rx_call_appl {
24 struct opr_queue iovq; /* readv/writev packet queue */
25 u_short nLeft; /* Number bytes left in first receive packet */
26 u_short curvec; /* current iovec in currentPacket */
27 u_short curlen; /* bytes remaining in curvec */
28 u_short nFree; /* Number bytes free in last send packet */
29 struct rx_packet *currentPacket; /* Current packet being assembled or being read */
30 char *curpos; /* current position in curvec */
31 int mode; /* Mode of call */
32 int padding; /* Pad to double word */
33 afs_uint64 bytesSent; /* Number bytes sent */
34 afs_uint64 bytesRcvd; /* Number bytes received */
35 };
36
37 /* Call structure: only instantiated for active calls and dallying
38 * server calls. The permanent call state (i.e. the call number as
39 * well as state shared with other calls associated with this
40 * connection) is maintained in the connection structure. */
41
42 #ifdef KDUMP_RX_LOCK
43 struct rx_call_rx_lock {
44 #else
45 struct rx_call {
46 #endif
47 struct opr_queue entry; /* Call can be on various queues (one-at-a-time) */
48 struct opr_queue tq; /* Transmit packet queue */
49 struct opr_queue rq; /* Receive packet queue */
50 struct rx_call_appl app; /* Data private to the application thread */
51 u_char channel; /* Index of call, within connection */
52 u_char state; /* Current call state as defined in rx.h */
53 #ifdef RX_ENABLE_LOCKS
54 afs_kmutex_t lock; /* lock covers data as well as mutexes. */
55 afs_kmutex_t *call_queue_lock; /* points to lock for queue we're on,
56 * if any. */
57 afs_kcondvar_t cv_twind;
58 afs_kcondvar_t cv_rq;
59 afs_kcondvar_t cv_tq;
60 #endif
61 #ifdef KDUMP_RX_LOCK
62 struct rx_connection_rx_lock *conn; /* Parent connection for call */
63 #else
64 struct rx_connection *conn; /* Parent connection for this call */
65 #endif
66 afs_uint32 *callNumber; /* Pointer to call number field within connection */
67 afs_uint32 flags; /* Some random flags */
68 u_char localStatus; /* Local user status sent out of band */
69 u_char remoteStatus; /* Remote user status received out of band */
70 afs_int32 error; /* Error condition for this call */
71 afs_uint32 timeout; /* High level timeout for this call */
72 afs_uint32 rnext; /* Next sequence number expected to be read by rx_ReadData */
73 afs_uint32 rprev; /* Previous packet received; used for deciding what the next packet to be received should be, in order to decide whether a negative acknowledge should be sent */
74 afs_uint32 rwind; /* The receive window: the peer must not send packets with sequence numbers >= rnext+rwind */
75 afs_uint32 tfirst; /* First unacknowledged transmit packet number */
76 afs_uint32 tnext; /* Next transmit sequence number to use */
77 afs_uint32 tprev; /* Last packet that we saw an ack for */
78 u_short twind; /* The transmit window: we cannot assign a sequence number to a packet >= tfirst + twind */
79 u_short cwind; /* The congestion window */
80 u_short nSoftAcked; /* Number soft acked transmit packets */
81 u_short nextCwind; /* The congestion window after recovery */
82 u_short nCwindAcks; /* Number acks received at current cwind */
83 u_short ssthresh; /* The slow start threshold */
84 u_short nDgramPackets; /* Packets per AFS 3.5 jumbogram */
85 u_short nAcks; /* The number of consecutive acks */
86 u_short nNacks; /* Number packets acked that follow the
87 * first negatively acked packet */
88 u_short nSoftAcks; /* The number of delayed soft acks */
89 u_short nHardAcks; /* The number of delayed hard acks */
90 u_short congestSeq; /* Peer's congestion sequence counter */
91 int rtt;
92 int rtt_dev;
93 struct clock rto; /* The round trip timeout calculated for this call */
94 struct rxevent *resendEvent; /* If this is non-Null, there is a retransmission event pending */
95 struct rxevent *keepAliveEvent; /* Scheduled periodically in active calls to keep call alive */
96 struct rxevent *growMTUEvent; /* Scheduled periodically in active calls to discover true maximum MTU */
97 struct rxevent *delayedAckEvent; /* Scheduled after all packets are received to send an ack if a reply or new call is not generated soon */
98 struct clock delayedAckTime; /* Time that next delayed ack was scheduled for */
99 struct rxevent *delayedAbortEvent; /* Scheduled to throttle looping client */
100 int abortCode; /* error code from last RPC */
101 int abortCount; /* number of times last error was sent */
102 u_int lastSendTime; /* Last time a packet was sent on this call */
103 u_int lastReceiveTime; /* Last time a packet was received for this call */
104 void (*arrivalProc) (struct rx_call * call, void * mh, int index); /* Procedure to call when reply is received */
105 void *arrivalProcHandle; /* Handle to pass to replyFunc */
106 int arrivalProcArg; /* Additional arg to pass to reply Proc */
107 afs_uint32 lastAcked; /* last packet "hard" acked by receiver */
108 afs_uint32 startWait; /* time server began waiting for input data/send quota */
109 struct clock traceWait; /* time server began waiting for input data/send quota */
110 struct clock traceStart; /* time the call started running */
111 u_short MTU; /* size of packets currently sending */
112 #ifdef RX_ENABLE_LOCKS
113 short refCount; /* Used to keep calls from disappearring
114 * when we get them from a queue. (rx_refcnt_lock) */
115 #endif /* RX_ENABLE_LOCKS */
116 /* Call refcount modifiers */
117 #define RX_CALL_REFCOUNT_BEGIN 0 /* GetCall/NewCall/EndCall */
118 #define RX_CALL_REFCOUNT_RESEND 1 /* resend event */
119 #define RX_CALL_REFCOUNT_DELAY 2 /* delayed ack */
120 #define RX_CALL_REFCOUNT_ALIVE 3 /* keep alive event */
121 #define RX_CALL_REFCOUNT_PACKET 4 /* waiting for packets. */
122 #define RX_CALL_REFCOUNT_SEND 5 /* rxi_Send */
123 #define RX_CALL_REFCOUNT_ABORT 7 /* delayed abort */
124 #define RX_CALL_REFCOUNT_MTU 8 /* grow mtu event */
125 #define RX_CALL_REFCOUNT_MAX 9 /* array size. */
126 #ifdef RX_REFCOUNT_CHECK
127 short refCDebug[RX_CALL_REFCOUNT_MAX];
128 #endif /* RX_REFCOUNT_CHECK */
129
130 /*
131 * iov, iovNBytes, iovMax, and iovNext are set in rxi_ReadvProc()
132 * and adjusted by rxi_FillReadVec(). iov does not own the buffers
133 * it refers to. The buffers belong to the packets stored in iovq.
134 * Only one call to rx_ReadvProc() can be active at a time.
135 */
136
137 int iovNBytes; /* byte count for current iovec */
138 int iovMax; /* number elements in current iovec */
139 int iovNext; /* next entry in current iovec */
140 struct iovec *iov; /* current iovec */
141
142 struct clock queueTime; /* time call was queued */
143 struct clock startTime; /* time call was started */
144
145 u_short tqWaiters;
146
147 struct rx_packet *xmitList[RX_MAXACKS]; /* Can't xmit more than we ack */
148 /* Protected by setting RX_CALL_TQ_BUSY */
149 #ifdef RXDEBUG_PACKET
150 u_short tqc; /* packet count in tq */
151 u_short rqc; /* packet count in rq */
152 u_short iovqc; /* packet count in iovq */
153
154 #ifdef KDUMP_RX_LOCK
155 struct rx_call_rx_lock *allNextp;
156 #else
157 struct rx_call *allNextp;
158 #endif
159 afs_uint32 call_id;
160 #endif
161 #ifdef AFS_RXERRQ_ENV
162 int neterr_gen;
163 #endif
164 };
165
166 /* Only include this once, even when re-loading for kdump. */
167 #ifndef _CALL_REF_DEFINED_
168 #define _CALL_REF_DEFINED_
169
170 #ifdef RX_ENABLE_LOCKS
171
172 # define CALL_HOLD(call, type) do { \
173 MUTEX_ENTER(&rx_refcnt_mutex); \
174 CALL_HOLD_R(call, type); \
175 MUTEX_EXIT(&rx_refcnt_mutex); \
176 } while(0)
177 # define CALL_RELE(call, type) do { \
178 MUTEX_ENTER(&rx_refcnt_mutex); \
179 CALL_RELE_R(call, type); \
180 MUTEX_EXIT(&rx_refcnt_mutex); \
181 } while(0)
182
183 #ifdef RX_REFCOUNT_CHECK
184 /* RX_REFCOUNT_CHECK is used to test for call refcount leaks by event
185 * type.
186 */
187 extern int rx_callHoldType;
188 #define CALL_HOLD_R(call, type) do { \
189 call->refCount++; \
190 call->refCDebug[type]++; \
191 if (call->refCDebug[type] > 50) {\
192 rx_callHoldType = type; \
193 osi_Panic("Huge call refCount"); \
194 } \
195 } while (0)
196 #define CALL_RELE_R(call, type) do { \
197 call->refCount--; \
198 call->refCDebug[type]--; \
199 if (call->refCDebug[type] > 50) {\
200 rx_callHoldType = type; \
201 osi_Panic("Negative call refCount"); \
202 } \
203 } while (0)
204 #else /* RX_REFCOUNT_CHECK */
205 #define CALL_HOLD_R(call, type) call->refCount++
206 #define CALL_RELE_R(call, type) call->refCount--
207 #endif /* RX_REFCOUNT_CHECK */
208
209 #else /* RX_ENABLE_LOCKS */
210 #define CALL_HOLD(call, type)
211 #define CALL_RELE(call, type)
212 #define CALL_RELE_R(call, type)
213 #endif /* RX_ENABLE_LOCKS */
214
215 #endif /* _CALL_REF_DEFINED_ */
216
217 #endif