Commit | Line | Data |
---|---|---|
805e021f CE |
1 | /* |
2 | * Copyright 2000, International Business Machines Corporation and others. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This software has been released under the terms of the IBM Public | |
6 | * License. For details, see the LICENSE file in the top-level source | |
7 | * directory or online at http://www.openafs.org/dl/license10.html | |
8 | */ | |
9 | ||
10 | #include <afsconfig.h> | |
11 | #include "afs/param.h" | |
12 | ||
13 | #include "afs/sysincludes.h" /* Standard vendor system headers */ | |
14 | #ifdef AFS_ALPHA_ENV | |
15 | #undef kmem_alloc | |
16 | #undef kmem_free | |
17 | #undef mem_alloc | |
18 | #undef mem_free | |
19 | #endif /* AFS_ALPHA_ENV */ | |
20 | #include "afsincludes.h" /* Afs-based standard headers */ | |
21 | #include "afs/afs_stats.h" /* statistics */ | |
22 | #include "afs_prototypes.h" | |
23 | ||
24 | extern int cacheDiskType; | |
25 | ||
26 | #ifndef AFS_NOSTATS | |
27 | static void | |
28 | FillStoreStats(int code, int idx, osi_timeval_t xferStartTime, | |
29 | afs_size_t bytesToXfer, afs_size_t bytesXferred) | |
30 | { | |
31 | struct afs_stats_xferData *xferP; | |
32 | osi_timeval_t xferStopTime; | |
33 | osi_timeval_t elapsedTime; | |
34 | ||
35 | xferP = &(afs_stats_cmfullperf.rpc.fsXferTimes[idx]); | |
36 | osi_GetuTime(&xferStopTime); | |
37 | (xferP->numXfers)++; | |
38 | if (!code) { | |
39 | (xferP->numSuccesses)++; | |
40 | afs_stats_XferSumBytes[idx] += bytesXferred; | |
41 | (xferP->sumBytes) += (afs_stats_XferSumBytes[idx] >> 10); | |
42 | afs_stats_XferSumBytes[idx] &= 0x3FF; | |
43 | if (bytesXferred < xferP->minBytes) | |
44 | xferP->minBytes = bytesXferred; | |
45 | if (bytesXferred > xferP->maxBytes) | |
46 | xferP->maxBytes = bytesXferred; | |
47 | ||
48 | /* | |
49 | * Tally the size of the object. Note: we tally the actual size, | |
50 | * NOT the number of bytes that made it out over the wire. | |
51 | */ | |
52 | if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET0) (xferP->count[0])++; | |
53 | else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET1) (xferP->count[1])++; | |
54 | else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET2) (xferP->count[2])++; | |
55 | else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET3) (xferP->count[3])++; | |
56 | else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET4) (xferP->count[4])++; | |
57 | else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET5) (xferP->count[5])++; | |
58 | else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET6) (xferP->count[6])++; | |
59 | else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET7) (xferP->count[7])++; | |
60 | else | |
61 | (xferP->count[8])++; | |
62 | ||
63 | afs_stats_GetDiff(elapsedTime, xferStartTime, xferStopTime); | |
64 | afs_stats_AddTo((xferP->sumTime), elapsedTime); | |
65 | afs_stats_SquareAddTo((xferP->sqrTime), elapsedTime); | |
66 | if (afs_stats_TimeLessThan(elapsedTime, (xferP->minTime))) { | |
67 | afs_stats_TimeAssign((xferP->minTime), elapsedTime); | |
68 | } | |
69 | if (afs_stats_TimeGreaterThan(elapsedTime, (xferP->maxTime))) { | |
70 | afs_stats_TimeAssign((xferP->maxTime), elapsedTime); | |
71 | } | |
72 | } | |
73 | } | |
74 | #endif /* AFS_NOSTATS */ | |
75 | ||
76 | /* rock and operations for RX_FILESERVER */ | |
77 | ||
78 | ||
79 | ||
80 | afs_int32 | |
81 | rxfs_storeUfsPrepare(void *r, afs_uint32 size, afs_uint32 *tlen) | |
82 | { | |
83 | *tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size); | |
84 | return 0; | |
85 | } | |
86 | ||
87 | afs_int32 | |
88 | rxfs_storeMemPrepare(void *r, afs_uint32 size, afs_uint32 *tlen) | |
89 | { | |
90 | afs_int32 code; | |
91 | struct rxfs_storeVariables *v = (struct rxfs_storeVariables *) r; | |
92 | ||
93 | RX_AFS_GUNLOCK(); | |
94 | code = rx_WritevAlloc(v->call, v->tiov, &v->tnio, RX_MAXIOVECS, size); | |
95 | RX_AFS_GLOCK(); | |
96 | if (code <= 0) { | |
97 | code = rx_Error(v->call); | |
98 | if (!code) | |
99 | code = -33; | |
100 | } | |
101 | else { | |
102 | *tlen = code; | |
103 | code = 0; | |
104 | } | |
105 | return code; | |
106 | } | |
107 | ||
108 | afs_int32 | |
109 | rxfs_storeUfsRead(void *r, struct osi_file *tfile, afs_uint32 offset, | |
110 | afs_uint32 tlen, afs_uint32 *bytesread) | |
111 | { | |
112 | afs_int32 code; | |
113 | struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r; | |
114 | ||
115 | *bytesread = 0; | |
116 | code = afs_osi_Read(tfile, -1, v->tbuffer, tlen); | |
117 | if (code < 0) | |
118 | return EIO; | |
119 | *bytesread = code; | |
120 | if (code == tlen) | |
121 | return 0; | |
122 | #if defined(KERNEL_HAVE_UERROR) | |
123 | if (getuerror()) | |
124 | return EIO; | |
125 | #endif | |
126 | return 0; | |
127 | } | |
128 | ||
129 | afs_int32 | |
130 | rxfs_storeMemRead(void *r, struct osi_file *tfile, afs_uint32 offset, | |
131 | afs_uint32 tlen, afs_uint32 *bytesread) | |
132 | { | |
133 | afs_int32 code; | |
134 | struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r; | |
135 | struct memCacheEntry *mceP = (struct memCacheEntry *)tfile; | |
136 | ||
137 | *bytesread = 0; | |
138 | code = afs_MemReadvBlk(mceP, offset, v->tiov, v->tnio, tlen); | |
139 | if (code != tlen) | |
140 | return -33; | |
141 | *bytesread = code; | |
142 | return 0; | |
143 | } | |
144 | ||
145 | afs_int32 | |
146 | rxfs_storeMemWrite(void *r, afs_uint32 l, afs_uint32 *byteswritten) | |
147 | { | |
148 | afs_int32 code; | |
149 | struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r; | |
150 | ||
151 | RX_AFS_GUNLOCK(); | |
152 | code = rx_Writev(v->call, v->tiov, v->tnio, l); | |
153 | RX_AFS_GLOCK(); | |
154 | if (code != l) { | |
155 | code = rx_Error(v->call); | |
156 | return (code ? code : -33); | |
157 | } | |
158 | *byteswritten = code; | |
159 | return 0; | |
160 | } | |
161 | ||
162 | afs_int32 | |
163 | rxfs_storeUfsWrite(void *r, afs_uint32 l, afs_uint32 *byteswritten) | |
164 | { | |
165 | afs_int32 code; | |
166 | struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r; | |
167 | ||
168 | RX_AFS_GUNLOCK(); | |
169 | code = rx_Write(v->call, v->tbuffer, l); | |
170 | /* writing 0 bytes will | |
171 | * push a short packet. Is that really what we want, just because the | |
172 | * data didn't come back from the disk yet? Let's try it and see. */ | |
173 | RX_AFS_GLOCK(); | |
174 | if (code != l) { | |
175 | code = rx_Error(v->call); | |
176 | return (code ? code : -33); | |
177 | } | |
178 | *byteswritten = code; | |
179 | return 0; | |
180 | } | |
181 | ||
182 | afs_int32 | |
183 | rxfs_storePadd(void *rock, afs_uint32 size) | |
184 | { | |
185 | afs_int32 code = 0; | |
186 | afs_uint32 tlen; | |
187 | struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)rock; | |
188 | ||
189 | if (!v->tbuffer) | |
190 | v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ); | |
191 | memset(v->tbuffer, 0, AFS_LRALLOCSIZ); | |
192 | ||
193 | while (size) { | |
194 | tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size); | |
195 | RX_AFS_GUNLOCK(); | |
196 | code = rx_Write(v->call, v->tbuffer, tlen); | |
197 | RX_AFS_GLOCK(); | |
198 | ||
199 | if (code != tlen) | |
200 | return -33; /* XXX */ | |
201 | size -= tlen; | |
202 | } | |
203 | return 0; | |
204 | } | |
205 | ||
206 | afs_int32 | |
207 | rxfs_storeStatus(void *rock) | |
208 | { | |
209 | struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)rock; | |
210 | ||
211 | if (rx_GetRemoteStatus(v->call) & 1) | |
212 | return 0; | |
213 | return 1; | |
214 | } | |
215 | ||
216 | afs_int32 | |
217 | rxfs_storeClose(void *r, struct AFSFetchStatus *OutStatus, int *doProcessFS) | |
218 | { | |
219 | afs_int32 code; | |
220 | struct AFSVolSync tsync; | |
221 | struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r; | |
222 | ||
223 | if (!v->call) | |
224 | return -1; | |
225 | RX_AFS_GUNLOCK(); | |
226 | #ifdef AFS_64BIT_CLIENT | |
227 | if (!v->hasNo64bit) | |
228 | code = EndRXAFS_StoreData64(v->call, OutStatus, &tsync); | |
229 | else | |
230 | #endif | |
231 | code = EndRXAFS_StoreData(v->call, OutStatus, &tsync); | |
232 | RX_AFS_GLOCK(); | |
233 | if (!code) | |
234 | *doProcessFS = 1; /* Flag to run afs_ProcessFS() later on */ | |
235 | ||
236 | return code; | |
237 | } | |
238 | ||
239 | afs_int32 | |
240 | rxfs_storeDestroy(void **r, afs_int32 code) | |
241 | { | |
242 | struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)*r; | |
243 | ||
244 | *r = NULL; | |
245 | if (v->call) { | |
246 | RX_AFS_GUNLOCK(); | |
247 | code = rx_EndCall(v->call, code); | |
248 | RX_AFS_GLOCK(); | |
249 | } | |
250 | if (v->tbuffer) | |
251 | osi_FreeLargeSpace(v->tbuffer); | |
252 | if (v->tiov) | |
253 | osi_FreeSmallSpace(v->tiov); | |
254 | osi_FreeSmallSpace(v); | |
255 | return code; | |
256 | } | |
257 | ||
258 | afs_int32 | |
259 | afs_GenericStoreProc(struct storeOps *ops, void *rock, | |
260 | struct dcache *tdc, int *shouldwake, | |
261 | afs_size_t *bytesXferred) | |
262 | { | |
263 | struct rxfs_storeVariables *svar = rock; | |
264 | afs_uint32 tlen, bytesread, byteswritten; | |
265 | afs_int32 code = 0; | |
266 | int offset = 0; | |
267 | afs_size_t size; | |
268 | struct osi_file *tfile; | |
269 | ||
270 | size = tdc->f.chunkBytes; | |
271 | ||
272 | tfile = afs_CFileOpen(&tdc->f.inode); | |
273 | osi_Assert(tfile); | |
274 | ||
275 | while ( size > 0 ) { | |
276 | code = (*ops->prepare)(rock, size, &tlen); | |
277 | if ( code ) | |
278 | break; | |
279 | ||
280 | code = (*ops->read)(rock, tfile, offset, tlen, &bytesread); | |
281 | if (code) | |
282 | break; | |
283 | ||
284 | tlen = bytesread; | |
285 | code = (*ops->write)(rock, tlen, &byteswritten); | |
286 | if (code) | |
287 | break; | |
288 | #ifndef AFS_NOSTATS | |
289 | *bytesXferred += byteswritten; | |
290 | #endif /* AFS_NOSTATS */ | |
291 | ||
292 | offset += tlen; | |
293 | size -= tlen; | |
294 | /* | |
295 | * if file has been locked on server, can allow | |
296 | * store to continue | |
297 | */ | |
298 | if (shouldwake && *shouldwake && ((*ops->status)(rock) == 0)) { | |
299 | *shouldwake = 0; /* only do this once */ | |
300 | afs_wakeup(svar->vcache); | |
301 | } | |
302 | } | |
303 | afs_CFileClose(tfile); | |
304 | ||
305 | return code; | |
306 | } | |
307 | ||
308 | static | |
309 | struct storeOps rxfs_storeUfsOps = { | |
310 | #ifndef HAVE_STRUCT_LABEL_SUPPORT | |
311 | rxfs_storeUfsPrepare, | |
312 | rxfs_storeUfsRead, | |
313 | rxfs_storeUfsWrite, | |
314 | rxfs_storeStatus, | |
315 | rxfs_storePadd, | |
316 | rxfs_storeClose, | |
317 | rxfs_storeDestroy, | |
318 | afs_GenericStoreProc | |
319 | #else | |
320 | .prepare = rxfs_storeUfsPrepare, | |
321 | .read = rxfs_storeUfsRead, | |
322 | .write = rxfs_storeUfsWrite, | |
323 | .status = rxfs_storeStatus, | |
324 | .padd = rxfs_storePadd, | |
325 | .close = rxfs_storeClose, | |
326 | .destroy = rxfs_storeDestroy, | |
327 | .storeproc = afs_GenericStoreProc | |
328 | #endif | |
329 | }; | |
330 | ||
331 | static | |
332 | struct storeOps rxfs_storeMemOps = { | |
333 | #ifndef HAVE_STRUCT_LABEL_SUPPORT | |
334 | rxfs_storeMemPrepare, | |
335 | rxfs_storeMemRead, | |
336 | rxfs_storeMemWrite, | |
337 | rxfs_storeStatus, | |
338 | rxfs_storePadd, | |
339 | rxfs_storeClose, | |
340 | rxfs_storeDestroy, | |
341 | afs_GenericStoreProc | |
342 | #else | |
343 | .prepare = rxfs_storeMemPrepare, | |
344 | .read = rxfs_storeMemRead, | |
345 | .write = rxfs_storeMemWrite, | |
346 | .status = rxfs_storeStatus, | |
347 | .padd = rxfs_storePadd, | |
348 | .close = rxfs_storeClose, | |
349 | .destroy = rxfs_storeDestroy, | |
350 | .storeproc = afs_GenericStoreProc | |
351 | #endif | |
352 | }; | |
353 | ||
354 | afs_int32 | |
355 | rxfs_storeInit(struct vcache *avc, struct afs_conn *tc, | |
356 | struct rx_connection *rxconn, afs_size_t base, | |
357 | afs_size_t bytes, afs_size_t length, | |
358 | int sync, struct storeOps **ops, void **rock) | |
359 | { | |
360 | afs_int32 code; | |
361 | struct rxfs_storeVariables *v; | |
362 | ||
363 | if ( !tc ) | |
364 | return -1; | |
365 | ||
366 | v = osi_AllocSmallSpace(sizeof(struct rxfs_storeVariables)); | |
367 | if (!v) | |
368 | osi_Panic("rxfs_storeInit: osi_AllocSmallSpace returned NULL\n"); | |
369 | memset(v, 0, sizeof(struct rxfs_storeVariables)); | |
370 | ||
371 | v->InStatus.ClientModTime = avc->f.m.Date; | |
372 | v->InStatus.Mask = AFS_SETMODTIME; | |
373 | v->vcache = avc; | |
374 | if (sync & AFS_SYNC) | |
375 | v->InStatus.Mask |= AFS_FSYNC; | |
376 | RX_AFS_GUNLOCK(); | |
377 | v->call = rx_NewCall(rxconn); | |
378 | if (v->call) { | |
379 | #ifdef AFS_64BIT_CLIENT | |
380 | if (!afs_serverHasNo64Bit(tc)) | |
381 | code = StartRXAFS_StoreData64( | |
382 | v->call, (struct AFSFid*)&avc->f.fid.Fid, | |
383 | &v->InStatus, base, bytes, length); | |
384 | else { | |
385 | if (length > 0xFFFFFFFF) | |
386 | code = EFBIG; | |
387 | else { | |
388 | afs_int32 t1 = base, t2 = bytes, t3 = length; | |
389 | code = StartRXAFS_StoreData(v->call, | |
390 | (struct AFSFid *) &avc->f.fid.Fid, | |
391 | &v->InStatus, t1, t2, t3); | |
392 | } | |
393 | v->hasNo64bit = 1; | |
394 | } | |
395 | #else /* AFS_64BIT_CLIENT */ | |
396 | code = StartRXAFS_StoreData(v->call, (struct AFSFid *)&avc->f.fid.Fid, | |
397 | &v->InStatus, base, bytes, length); | |
398 | #endif /* AFS_64BIT_CLIENT */ | |
399 | } else | |
400 | code = -1; | |
401 | RX_AFS_GLOCK(); | |
402 | if (code) { | |
403 | *rock = v; | |
404 | return rxfs_storeDestroy(rock, code); | |
405 | } | |
406 | if (cacheDiskType == AFS_FCACHE_TYPE_UFS) { | |
407 | v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ); | |
408 | if (!v->tbuffer) | |
409 | osi_Panic | |
410 | ("rxfs_storeInit: osi_AllocLargeSpace for iovecs returned NULL\n"); | |
411 | *ops = (struct storeOps *) &rxfs_storeUfsOps; | |
412 | } else { | |
413 | v->tiov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS); | |
414 | if (!v->tiov) | |
415 | osi_Panic | |
416 | ("rxfs_storeInit: osi_AllocSmallSpace for iovecs returned NULL\n"); | |
417 | *ops = (struct storeOps *) &rxfs_storeMemOps; | |
418 | #ifdef notdef | |
419 | /* do this at a higher level now -- it's a parameter */ | |
420 | /* for now, only do 'continue from close' code if file fits in one | |
421 | * chunk. Could clearly do better: if only one modified chunk | |
422 | * then can still do this. can do this on *last* modified chunk */ | |
423 | length = avc->f.m.Length - 1; /* byte position of last byte we'll store */ | |
424 | if (shouldWake) { | |
425 | if (AFS_CHUNK(length) != 0) | |
426 | *shouldWake = 0; | |
427 | else | |
428 | *shouldWake = 1; | |
429 | } | |
430 | #endif /* notdef */ | |
431 | } | |
432 | ||
433 | *rock = (void *)v; | |
434 | return 0; | |
435 | } | |
436 | unsigned int storeallmissing = 0; | |
437 | /*! | |
438 | * Called for each chunk upon store. | |
439 | * | |
440 | * \param avc Ptr to the vcache entry of the file being stored. | |
441 | * \param dclist pointer to the list of dcaches | |
442 | * \param bytes total number of bytes for the current operation | |
443 | * \param anewDV Ptr to the dataversion after store | |
444 | * \param doProcessFS pointer to the "do process FetchStatus" flag | |
445 | * \param OutStatus pointer to the FetchStatus as returned by the fileserver | |
446 | * \param nchunks number of dcaches to consider | |
447 | * \param nomore copy of the "no more data" flag | |
448 | * \param ops pointer to the block of storeOps to be used for this operation | |
449 | * \param rock pointer to the opaque protocol-specific data of this operation | |
450 | */ | |
451 | afs_int32 | |
452 | afs_CacheStoreDCaches(struct vcache *avc, struct dcache **dclist, | |
453 | afs_size_t bytes, afs_hyper_t *anewDV, int *doProcessFS, | |
454 | struct AFSFetchStatus *OutStatus, afs_uint32 nchunks, | |
455 | int nomore, struct storeOps *ops, void *rock) | |
456 | { | |
457 | int *shouldwake = NULL; | |
458 | unsigned int i; | |
459 | int stored = 0; | |
460 | afs_int32 code = 0; | |
461 | afs_size_t bytesXferred; | |
462 | ||
463 | #ifndef AFS_NOSTATS | |
464 | osi_timeval_t xferStartTime; /*FS xfer start time */ | |
465 | afs_size_t bytesToXfer = 10000; /* # bytes to xfer */ | |
466 | #endif /* AFS_NOSTATS */ | |
467 | XSTATS_DECLS; | |
468 | osi_Assert(nchunks != 0); | |
469 | ||
470 | for (i = 0; i < nchunks && !code; i++) { | |
471 | struct dcache *tdc = dclist[i]; | |
472 | afs_int32 size; | |
473 | ||
474 | if (!tdc) { | |
475 | afs_warn("afs: missing dcache!\n"); | |
476 | storeallmissing++; | |
477 | continue; /* panic? */ | |
478 | } | |
479 | size = tdc->f.chunkBytes; | |
480 | afs_Trace4(afs_iclSetp, CM_TRACE_STOREALL2, ICL_TYPE_POINTER, avc, | |
481 | ICL_TYPE_INT32, tdc->f.chunk, ICL_TYPE_INT32, tdc->index, | |
482 | ICL_TYPE_INT32, afs_inode2trace(&tdc->f.inode)); | |
483 | shouldwake = 0; | |
484 | if (nomore) { | |
485 | if (avc->asynchrony == -1) { | |
486 | if (afs_defaultAsynchrony > (bytes - stored)) | |
487 | shouldwake = &nomore; | |
488 | } | |
489 | else if ((afs_uint32) avc->asynchrony >= (bytes - stored)) | |
490 | shouldwake = &nomore; | |
491 | } | |
492 | ||
493 | afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc, | |
494 | ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET, | |
495 | ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, size); | |
496 | ||
497 | AFS_STATCNT(CacheStoreProc); | |
498 | ||
499 | XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA); | |
500 | avc->f.truncPos = AFS_NOTRUNC; | |
501 | #ifndef AFS_NOSTATS | |
502 | /* | |
503 | * In this case, size is *always* the amount of data we'll be trying | |
504 | * to ship here. | |
505 | */ | |
506 | bytesToXfer = size; | |
507 | ||
508 | osi_GetuTime(&xferStartTime); | |
509 | #endif /* AFS_NOSTATS */ | |
510 | bytesXferred = 0; | |
511 | ||
512 | code = (*ops->storeproc)(ops, rock, tdc, shouldwake, | |
513 | &bytesXferred); | |
514 | ||
515 | afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc, | |
516 | ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET, | |
517 | ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, size); | |
518 | ||
519 | #ifndef AFS_NOSTATS | |
520 | FillStoreStats(code, AFS_STATS_FS_XFERIDX_STOREDATA, | |
521 | xferStartTime, bytesToXfer, bytesXferred); | |
522 | #endif /* AFS_NOSTATS */ | |
523 | ||
524 | if ((tdc->f.chunkBytes < afs_OtherCSize) | |
525 | && (i < (nchunks - 1)) && code == 0) { | |
526 | code = (*ops->padd)(rock, afs_OtherCSize - tdc->f.chunkBytes); | |
527 | } | |
528 | stored += tdc->f.chunkBytes; | |
529 | /* ideally, I'd like to unlock the dcache and turn | |
530 | * off the writing bit here, but that would | |
531 | * require being able to retry StoreAllSegments in | |
532 | * the event of a failure. It only really matters | |
533 | * if user can't read from a 'locked' dcache or | |
534 | * one which has the writing bit turned on. */ | |
535 | } | |
536 | ||
537 | if (!code) { | |
538 | code = (*ops->close)(rock, OutStatus, doProcessFS); | |
539 | /* if this succeeds, dv has been bumped. */ | |
540 | if (*doProcessFS) { | |
541 | hadd32(*anewDV, 1); | |
542 | } | |
543 | XSTATS_END_TIME; | |
544 | } | |
545 | if (ops) | |
546 | code = (*ops->destroy)(&rock, code); | |
547 | ||
548 | /* if we errored, can't trust this. */ | |
549 | if (code) | |
550 | *doProcessFS = 0; | |
551 | ||
552 | return code; | |
553 | } | |
554 | ||
555 | #define lmin(a,b) (((a) < (b)) ? (a) : (b)) | |
556 | /*! | |
557 | * Called upon store. | |
558 | * | |
559 | * \param dclist pointer to the list of dcaches | |
560 | * \param avc Ptr to the vcache entry. | |
561 | * \param areq Ptr to the request structure | |
562 | * \param sync sync flag | |
563 | * \param minj the chunk offset for this call | |
564 | * \param high index of last dcache to store | |
565 | * \param moredata the moredata flag | |
566 | * \param anewDV Ptr to the dataversion after store | |
567 | * \param amaxStoredLength Ptr to the amount of that is actually stored | |
568 | * | |
569 | * \note Environment: Nothing interesting. | |
570 | */ | |
571 | int | |
572 | afs_CacheStoreVCache(struct dcache **dcList, struct vcache *avc, | |
573 | struct vrequest *areq, int sync, unsigned int minj, | |
574 | unsigned int high, unsigned int moredata, | |
575 | afs_hyper_t *anewDV, afs_size_t *amaxStoredLength) | |
576 | { | |
577 | afs_int32 code = 0; | |
578 | struct storeOps *ops; | |
579 | void * rock = NULL; | |
580 | unsigned int i, j; | |
581 | ||
582 | struct AFSFetchStatus OutStatus; | |
583 | int doProcessFS = 0; | |
584 | afs_size_t base, bytes, length; | |
585 | int nomore; | |
586 | unsigned int first = 0; | |
587 | struct afs_conn *tc; | |
588 | struct rx_connection *rxconn; | |
589 | ||
590 | for (bytes = 0, j = 0; !code && j <= high; j++) { | |
591 | if (dcList[j]) { | |
592 | ObtainSharedLock(&(dcList[j]->lock), 629); | |
593 | if (!bytes) | |
594 | first = j; | |
595 | bytes += dcList[j]->f.chunkBytes; | |
596 | if ((dcList[j]->f.chunkBytes < afs_OtherCSize) | |
597 | && (dcList[j]->f.chunk - minj < high) | |
598 | && dcList[j + 1]) { | |
599 | int sbytes = afs_OtherCSize - dcList[j]->f.chunkBytes; | |
600 | bytes += sbytes; | |
601 | } | |
602 | } | |
603 | if (bytes && (j == high || !dcList[j + 1])) { | |
604 | afs_uint32 nchunks; | |
605 | struct dcache **dclist = &dcList[first]; | |
606 | /* base = AFS_CHUNKTOBASE(dcList[first]->f.chunk); */ | |
607 | base = AFS_CHUNKTOBASE(first + minj); | |
608 | /* | |
609 | * | |
610 | * take a list of dcache structs and send them all off to the server | |
611 | * the list must be in order, and the chunks contiguous. | |
612 | * Note - there is no locking done by this code currently. For | |
613 | * safety's sake, xdcache could be locked over the entire call. | |
614 | * However, that pretty well ties up all the threads. Meantime, all | |
615 | * the chunks _MUST_ have their refcounts bumped. | |
616 | * The writes done before a store back will clear setuid-ness | |
617 | * in cache file. | |
618 | * We can permit CacheStoreProc to wake up the user process IFF we | |
619 | * are doing the last RPC for this close, ie, storing back the last | |
620 | * set of contiguous chunks of a file. | |
621 | */ | |
622 | ||
623 | nchunks = 1 + j - first; | |
624 | nomore = !(moredata || (j != high)); | |
625 | length = lmin(avc->f.m.Length, avc->f.truncPos); | |
626 | afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64, | |
627 | ICL_TYPE_FID, &avc->f.fid.Fid, ICL_TYPE_OFFSET, | |
628 | ICL_HANDLE_OFFSET(base), ICL_TYPE_OFFSET, | |
629 | ICL_HANDLE_OFFSET(bytes), ICL_TYPE_OFFSET, | |
630 | ICL_HANDLE_OFFSET(length)); | |
631 | ||
632 | do { | |
633 | tc = afs_Conn(&avc->f.fid, areq, 0, &rxconn); | |
634 | ||
635 | #ifdef AFS_64BIT_CLIENT | |
636 | restart: | |
637 | #endif | |
638 | code = rxfs_storeInit(avc, tc, rxconn, base, bytes, length, | |
639 | sync, &ops, &rock); | |
640 | if ( !code ) { | |
641 | code = afs_CacheStoreDCaches(avc, dclist, bytes, anewDV, | |
642 | &doProcessFS, &OutStatus, | |
643 | nchunks, nomore, ops, rock); | |
644 | } | |
645 | ||
646 | #ifdef AFS_64BIT_CLIENT | |
647 | if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) { | |
648 | afs_serverSetNo64Bit(tc); | |
649 | goto restart; | |
650 | } | |
651 | #endif /* AFS_64BIT_CLIENT */ | |
652 | } while (afs_Analyze | |
653 | (tc, rxconn, code, &avc->f.fid, areq, | |
654 | AFS_STATS_FS_RPCIDX_STOREDATA, SHARED_LOCK, | |
655 | NULL)); | |
656 | ||
657 | /* put back all remaining locked dcache entries */ | |
658 | for (i = 0; i < nchunks; i++) { | |
659 | struct dcache *tdc = dclist[i]; | |
660 | if (!code) { | |
661 | if (afs_indexFlags[tdc->index] & IFDataMod) { | |
662 | /* | |
663 | * LOCKXXX -- should hold afs_xdcache(W) when | |
664 | * modifying afs_indexFlags. | |
665 | */ | |
666 | afs_indexFlags[tdc->index] &= ~IFDataMod; | |
667 | afs_stats_cmperf.cacheCurrDirtyChunks--; | |
668 | afs_indexFlags[tdc->index] &= ~IFDirtyPages; | |
669 | if (sync & AFS_VMSYNC_INVAL) { | |
670 | /* since we have invalidated all the pages of this | |
671 | ** vnode by calling osi_VM_TryToSmush, we can | |
672 | ** safely mark this dcache entry as not having | |
673 | ** any pages. This vnode now becomes eligible for | |
674 | ** reclamation by getDownD. | |
675 | */ | |
676 | afs_indexFlags[tdc->index] &= ~IFAnyPages; | |
677 | } | |
678 | } | |
679 | } | |
680 | UpgradeSToWLock(&tdc->lock, 628); | |
681 | tdc->f.states &= ~DWriting; /* correct? */ | |
682 | tdc->dflags |= DFEntryMod; | |
683 | ReleaseWriteLock(&tdc->lock); | |
684 | afs_PutDCache(tdc); | |
685 | /* Mark the entry as released */ | |
686 | dclist[i] = NULL; | |
687 | } | |
688 | ||
689 | if (doProcessFS) { | |
690 | /* Now copy out return params */ | |
691 | UpgradeSToWLock(&avc->lock, 28); /* keep out others for a while */ | |
692 | afs_ProcessFS(avc, &OutStatus, areq); | |
693 | /* Keep last (max) size of file on server to see if | |
694 | * we need to call afs_StoreMini to extend the file. | |
695 | */ | |
696 | if (!moredata) | |
697 | *amaxStoredLength = OutStatus.Length; | |
698 | ConvertWToSLock(&avc->lock); | |
699 | doProcessFS = 0; | |
700 | } | |
701 | ||
702 | if (code) { | |
703 | for (j++; j <= high; j++) { | |
704 | if (dcList[j]) { | |
705 | ReleaseSharedLock(&(dcList[j]->lock)); | |
706 | afs_PutDCache(dcList[j]); | |
707 | /* Releasing entry */ | |
708 | dcList[j] = NULL; | |
709 | } | |
710 | } | |
711 | } | |
712 | ||
713 | afs_Trace2(afs_iclSetp, CM_TRACE_STOREALLDCDONE, | |
714 | ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code); | |
715 | bytes = 0; | |
716 | } | |
717 | } | |
718 | ||
719 | return code; | |
720 | } | |
721 | ||
722 | /* rock and operations for RX_FILESERVER */ | |
723 | ||
724 | struct rxfs_fetchVariables { | |
725 | struct rx_call *call; | |
726 | char *tbuffer; | |
727 | struct iovec *iov; | |
728 | afs_int32 nio; | |
729 | afs_int32 hasNo64bit; | |
730 | afs_int32 iovno; | |
731 | afs_int32 iovmax; | |
732 | }; | |
733 | ||
734 | afs_int32 | |
735 | rxfs_fetchUfsRead(void *r, afs_uint32 size, afs_uint32 *bytesread) | |
736 | { | |
737 | afs_int32 code; | |
738 | afs_uint32 tlen; | |
739 | struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r; | |
740 | ||
741 | *bytesread = 0; | |
742 | tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size); | |
743 | RX_AFS_GUNLOCK(); | |
744 | code = rx_Read(v->call, v->tbuffer, tlen); | |
745 | RX_AFS_GLOCK(); | |
746 | if (code <= 0) | |
747 | return -34; | |
748 | *bytesread = code; | |
749 | return 0; | |
750 | } | |
751 | ||
752 | afs_int32 | |
753 | rxfs_fetchMemRead(void *r, afs_uint32 tlen, afs_uint32 *bytesread) | |
754 | { | |
755 | afs_int32 code; | |
756 | struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r; | |
757 | ||
758 | *bytesread = 0; | |
759 | RX_AFS_GUNLOCK(); | |
760 | code = rx_Readv(v->call, v->iov, &v->nio, RX_MAXIOVECS, tlen); | |
761 | RX_AFS_GLOCK(); | |
762 | if (code <= 0) | |
763 | return -34; | |
764 | *bytesread = code; | |
765 | return 0; | |
766 | } | |
767 | ||
768 | ||
769 | afs_int32 | |
770 | rxfs_fetchMemWrite(void *r, struct osi_file *fP, afs_uint32 offset, | |
771 | afs_uint32 tlen, afs_uint32 *byteswritten) | |
772 | { | |
773 | afs_int32 code; | |
774 | struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r; | |
775 | struct memCacheEntry *mceP = (struct memCacheEntry *)fP; | |
776 | ||
777 | code = afs_MemWritevBlk(mceP, offset, v->iov, v->nio, tlen); | |
778 | if (code != tlen) { | |
779 | return EIO; | |
780 | } | |
781 | *byteswritten = code; | |
782 | return 0; | |
783 | } | |
784 | ||
785 | afs_int32 | |
786 | rxfs_fetchUfsWrite(void *r, struct osi_file *fP, afs_uint32 offset, | |
787 | afs_uint32 tlen, afs_uint32 *byteswritten) | |
788 | { | |
789 | afs_int32 code; | |
790 | struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r; | |
791 | ||
792 | code = afs_osi_Write(fP, -1, v->tbuffer, tlen); | |
793 | if (code != tlen) { | |
794 | return EIO; | |
795 | } | |
796 | *byteswritten = code; | |
797 | return 0; | |
798 | } | |
799 | ||
800 | ||
801 | afs_int32 | |
802 | rxfs_fetchClose(void *r, struct vcache *avc, struct dcache * adc, | |
803 | struct afs_FetchOutput *o) | |
804 | { | |
805 | afs_int32 code; | |
806 | struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r; | |
807 | ||
808 | if (!v->call) | |
809 | return -1; | |
810 | ||
811 | RX_AFS_GUNLOCK(); | |
812 | #ifdef AFS_64BIT_CLIENT | |
813 | if (!v->hasNo64bit) | |
814 | code = EndRXAFS_FetchData64(v->call, &o->OutStatus, &o->CallBack, | |
815 | &o->tsync); | |
816 | else | |
817 | #endif | |
818 | code = EndRXAFS_FetchData(v->call, &o->OutStatus, &o->CallBack, | |
819 | &o->tsync); | |
820 | code = rx_EndCall(v->call, code); | |
821 | RX_AFS_GLOCK(); | |
822 | ||
823 | v->call = NULL; | |
824 | ||
825 | return code; | |
826 | } | |
827 | ||
828 | afs_int32 | |
829 | rxfs_fetchDestroy(void **r, afs_int32 code) | |
830 | { | |
831 | struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)*r; | |
832 | ||
833 | *r = NULL; | |
834 | if (v->call) { | |
835 | RX_AFS_GUNLOCK(); | |
836 | code = rx_EndCall(v->call, code); | |
837 | RX_AFS_GLOCK(); | |
838 | } | |
839 | if (v->tbuffer) | |
840 | osi_FreeLargeSpace(v->tbuffer); | |
841 | if (v->iov) | |
842 | osi_FreeSmallSpace(v->iov); | |
843 | osi_FreeSmallSpace(v); | |
844 | return code; | |
845 | } | |
846 | ||
847 | afs_int32 | |
848 | rxfs_fetchMore(void *r, afs_int32 *length, afs_uint32 *moredata) | |
849 | { | |
850 | afs_int32 code; | |
851 | struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r; | |
852 | ||
853 | /* | |
854 | * The fetch protocol is extended for the AFS/DFS translator | |
855 | * to allow multiple blocks of data, each with its own length, | |
856 | * to be returned. As long as the top bit is set, there are more | |
857 | * blocks expected. | |
858 | * | |
859 | * We do not do this for AFS file servers because they sometimes | |
860 | * return large negative numbers as the transfer size. | |
861 | */ | |
862 | if (*moredata) { | |
863 | RX_AFS_GUNLOCK(); | |
864 | code = rx_Read(v->call, (void *)length, sizeof(afs_int32)); | |
865 | RX_AFS_GLOCK(); | |
866 | *length = ntohl(*length); | |
867 | if (code != sizeof(afs_int32)) { | |
868 | code = rx_Error(v->call); | |
869 | *moredata = 0; | |
870 | return (code ? code : -1); /* try to return code, not -1 */ | |
871 | } | |
872 | } | |
873 | *moredata = *length & 0x80000000; | |
874 | *length &= ~0x80000000; | |
875 | return 0; | |
876 | } | |
877 | ||
878 | static | |
879 | struct fetchOps rxfs_fetchUfsOps = { | |
880 | rxfs_fetchMore, | |
881 | rxfs_fetchUfsRead, | |
882 | rxfs_fetchUfsWrite, | |
883 | rxfs_fetchClose, | |
884 | rxfs_fetchDestroy | |
885 | }; | |
886 | ||
887 | static | |
888 | struct fetchOps rxfs_fetchMemOps = { | |
889 | rxfs_fetchMore, | |
890 | rxfs_fetchMemRead, | |
891 | rxfs_fetchMemWrite, | |
892 | rxfs_fetchClose, | |
893 | rxfs_fetchDestroy | |
894 | }; | |
895 | ||
896 | afs_int32 | |
897 | rxfs_fetchInit(struct afs_conn *tc, struct rx_connection *rxconn, | |
898 | struct vcache *avc, afs_offs_t base, | |
899 | afs_uint32 size, afs_int32 *alength, struct dcache *adc, | |
900 | struct osi_file *fP, struct fetchOps **ops, void **rock) | |
901 | { | |
902 | struct rxfs_fetchVariables *v; | |
903 | int code = 0; | |
904 | #ifdef AFS_64BIT_CLIENT | |
905 | afs_uint32 length_hi = 0; | |
906 | #endif | |
907 | afs_uint32 length = 0, bytes; | |
908 | ||
909 | v = (struct rxfs_fetchVariables *) | |
910 | osi_AllocSmallSpace(sizeof(struct rxfs_fetchVariables)); | |
911 | if (!v) | |
912 | osi_Panic("rxfs_fetchInit: osi_AllocSmallSpace returned NULL\n"); | |
913 | memset(v, 0, sizeof(struct rxfs_fetchVariables)); | |
914 | ||
915 | RX_AFS_GUNLOCK(); | |
916 | v->call = rx_NewCall(rxconn); | |
917 | RX_AFS_GLOCK(); | |
918 | if (v->call) { | |
919 | #ifdef AFS_64BIT_CLIENT | |
920 | afs_size_t length64; /* as returned from server */ | |
921 | if (!afs_serverHasNo64Bit(tc)) { | |
922 | afs_uint64 llbytes = size; | |
923 | RX_AFS_GUNLOCK(); | |
924 | code = StartRXAFS_FetchData64(v->call, | |
925 | (struct AFSFid *) &avc->f.fid.Fid, | |
926 | base, llbytes); | |
927 | if (code != 0) { | |
928 | RX_AFS_GLOCK(); | |
929 | afs_Trace2(afs_iclSetp, CM_TRACE_FETCH64CODE, | |
930 | ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code); | |
931 | } else { | |
932 | bytes = rx_Read(v->call, (char *)&length_hi, sizeof(afs_int32)); | |
933 | RX_AFS_GLOCK(); | |
934 | if (bytes == sizeof(afs_int32)) { | |
935 | length_hi = ntohl(length_hi); | |
936 | } else { | |
937 | RX_AFS_GUNLOCK(); | |
938 | code = rx_EndCall(v->call, RX_PROTOCOL_ERROR); | |
939 | RX_AFS_GLOCK(); | |
940 | v->call = NULL; | |
941 | } | |
942 | } | |
943 | } | |
944 | if (code == RXGEN_OPCODE || afs_serverHasNo64Bit(tc)) { | |
945 | if (base > 0x7FFFFFFF) { | |
946 | code = EFBIG; | |
947 | } else { | |
948 | afs_uint32 pos; | |
949 | pos = base; | |
950 | RX_AFS_GUNLOCK(); | |
951 | if (!v->call) | |
952 | v->call = rx_NewCall(rxconn); | |
953 | code = | |
954 | StartRXAFS_FetchData( | |
955 | v->call, (struct AFSFid*)&avc->f.fid.Fid, | |
956 | pos, size); | |
957 | RX_AFS_GLOCK(); | |
958 | } | |
959 | afs_serverSetNo64Bit(tc); | |
960 | v->hasNo64bit = 1; | |
961 | } | |
962 | if (!code) { | |
963 | RX_AFS_GUNLOCK(); | |
964 | bytes = rx_Read(v->call, (char *)&length, sizeof(afs_int32)); | |
965 | RX_AFS_GLOCK(); | |
966 | if (bytes == sizeof(afs_int32)) | |
967 | length = ntohl(length); | |
968 | else { | |
969 | RX_AFS_GUNLOCK(); | |
970 | code = rx_EndCall(v->call, RX_PROTOCOL_ERROR); | |
971 | v->call = NULL; | |
972 | length = 0; | |
973 | RX_AFS_GLOCK(); | |
974 | } | |
975 | } | |
976 | FillInt64(length64, length_hi, length); | |
977 | ||
978 | if (!code) { | |
979 | /* Check if the fileserver said our length is bigger than can fit | |
980 | * in a signed 32-bit integer. If it is, we can't handle that, so | |
981 | * error out. */ | |
982 | if (length64 > MAX_AFS_INT32) { | |
983 | static int warned; | |
984 | if (!warned) { | |
985 | warned = 1; | |
986 | afs_warn("afs: Warning: FetchData64 returned too much data " | |
987 | "(length64 %u.%u); this should not happen! " | |
988 | "Aborting fetch request.\n", | |
989 | length_hi, length); | |
990 | } | |
991 | RX_AFS_GUNLOCK(); | |
992 | code = rx_EndCall(v->call, RX_PROTOCOL_ERROR); | |
993 | v->call = NULL; | |
994 | length = 0; | |
995 | RX_AFS_GLOCK(); | |
996 | code = code != 0 ? code : EIO; | |
997 | } | |
998 | } | |
999 | ||
1000 | if (!code) { | |
1001 | /* Check if the fileserver said our length was negative. If it | |
1002 | * is, just treat it as a 0 length, since some older fileservers | |
1003 | * returned negative numbers when they meant to return 0. Note | |
1004 | * that we must do this in this 64-bit-specific block, since | |
1005 | * length64 being negative will screw up our conversion to the | |
1006 | * 32-bit 'alength' below. */ | |
1007 | if (length64 < 0) { | |
1008 | length_hi = length = 0; | |
1009 | FillInt64(length64, 0, 0); | |
1010 | } | |
1011 | } | |
1012 | ||
1013 | afs_Trace3(afs_iclSetp, CM_TRACE_FETCH64LENG, | |
1014 | ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code, | |
1015 | ICL_TYPE_OFFSET, | |
1016 | ICL_HANDLE_OFFSET(length64)); | |
1017 | if (!code) | |
1018 | *alength = length; | |
1019 | #else /* AFS_64BIT_CLIENT */ | |
1020 | RX_AFS_GUNLOCK(); | |
1021 | code = StartRXAFS_FetchData(v->call, (struct AFSFid *)&avc->f.fid.Fid, | |
1022 | base, size); | |
1023 | RX_AFS_GLOCK(); | |
1024 | if (code == 0) { | |
1025 | RX_AFS_GUNLOCK(); | |
1026 | bytes = | |
1027 | rx_Read(v->call, (char *)&length, sizeof(afs_int32)); | |
1028 | RX_AFS_GLOCK(); | |
1029 | if (bytes == sizeof(afs_int32)) { | |
1030 | *alength = ntohl(length); | |
1031 | if (*alength < 0) { | |
1032 | /* Older fileservers can return a negative length when they | |
1033 | * meant to return 0; just assume negative lengths were | |
1034 | * meant to be 0 lengths. */ | |
1035 | *alength = 0; | |
1036 | } | |
1037 | } else { | |
1038 | code = rx_EndCall(v->call, RX_PROTOCOL_ERROR); | |
1039 | v->call = NULL; | |
1040 | } | |
1041 | } | |
1042 | #endif /* AFS_64BIT_CLIENT */ | |
1043 | } else | |
1044 | code = -1; | |
1045 | ||
1046 | /* We need to cast here, in order to avoid issues if *alength is | |
1047 | * negative. Some, older, fileservers can return a negative length, | |
1048 | * which the rest of the code deals correctly with. */ | |
1049 | if (code == 0 && *alength > (afs_int32) size) { | |
1050 | /* The fileserver told us it is going to send more data than we | |
1051 | * requested. It shouldn't do that, and accepting that much data | |
1052 | * can make us take up more cache space than we're supposed to, | |
1053 | * so error. */ | |
1054 | static int warned; | |
1055 | if (!warned) { | |
1056 | warned = 1; | |
1057 | afs_warn("afs: Warning: FetchData64 returned more data than " | |
1058 | "requested (requested %ld, got %ld); this should not " | |
1059 | "happen! Aborting fetch request.\n", | |
1060 | (long)size, (long)*alength); | |
1061 | } | |
1062 | RX_AFS_GUNLOCK(); | |
1063 | code = rx_EndCall(v->call, RX_PROTOCOL_ERROR); | |
1064 | RX_AFS_GLOCK(); | |
1065 | v->call = NULL; | |
1066 | code = EIO; | |
1067 | } | |
1068 | ||
1069 | if (code) { | |
1070 | *rock = v; | |
1071 | return rxfs_fetchDestroy(rock, code); | |
1072 | } | |
1073 | if (cacheDiskType == AFS_FCACHE_TYPE_UFS) { | |
1074 | v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ); | |
1075 | if (!v->tbuffer) | |
1076 | osi_Panic("rxfs_fetchInit: osi_AllocLargeSpace for iovecs returned NULL\n"); | |
1077 | osi_Assert(WriteLocked(&adc->lock)); | |
1078 | fP->offset = 0; | |
1079 | *ops = (struct fetchOps *) &rxfs_fetchUfsOps; | |
1080 | } | |
1081 | else { | |
1082 | afs_Trace4(afs_iclSetp, CM_TRACE_MEMFETCH, ICL_TYPE_POINTER, avc, | |
1083 | ICL_TYPE_POINTER, fP, ICL_TYPE_OFFSET, | |
1084 | ICL_HANDLE_OFFSET(base), ICL_TYPE_INT32, length); | |
1085 | /* | |
1086 | * We need to alloc the iovecs on the heap so that they are "pinned" | |
1087 | * rather than declare them on the stack - defect 11272 | |
1088 | */ | |
1089 | v->iov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS); | |
1090 | if (!v->iov) | |
1091 | osi_Panic("rxfs_fetchInit: osi_AllocSmallSpace for iovecs returned NULL\n"); | |
1092 | *ops = (struct fetchOps *) &rxfs_fetchMemOps; | |
1093 | } | |
1094 | *rock = (void *)v; | |
1095 | return 0; | |
1096 | } | |
1097 | ||
1098 | ||
1099 | /*! | |
1100 | * Routine called on fetch; also tells people waiting for data | |
1101 | * that more has arrived. | |
1102 | * | |
1103 | * \param tc Ptr to the AFS connection structure. | |
1104 | * \param rxconn Ptr to the Rx connection structure. | |
1105 | * \param fP File descriptor for the cache file. | |
1106 | * \param base Base offset to fetch. | |
1107 | * \param adc Ptr to the dcache entry for the file, write-locked. | |
1108 | * \param avc Ptr to the vcache entry for the file. | |
1109 | * \param size Amount of data that should be fetched. | |
1110 | * \param tsmall Ptr to the afs_FetchOutput structure. | |
1111 | * | |
1112 | * \note Environment: Nothing interesting. | |
1113 | */ | |
1114 | int | |
1115 | afs_CacheFetchProc(struct afs_conn *tc, struct rx_connection *rxconn, | |
1116 | struct osi_file *fP, afs_size_t base, | |
1117 | struct dcache *adc, struct vcache *avc, afs_int32 size, | |
1118 | struct afs_FetchOutput *tsmall) | |
1119 | { | |
1120 | afs_int32 code; | |
1121 | afs_int32 length; | |
1122 | afs_uint32 bytesread, byteswritten; | |
1123 | struct fetchOps *ops = NULL; | |
1124 | void *rock = NULL; | |
1125 | afs_uint32 moredata = 0; | |
1126 | int offset = 0; | |
1127 | ||
1128 | XSTATS_DECLS; | |
1129 | #ifndef AFS_NOSTATS | |
1130 | osi_timeval_t xferStartTime; /*FS xfer start time */ | |
1131 | afs_size_t bytesToXfer = 0, bytesXferred = 0; | |
1132 | #endif | |
1133 | ||
1134 | AFS_STATCNT(CacheFetchProc); | |
1135 | ||
1136 | XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHDATA); | |
1137 | ||
1138 | /* | |
1139 | * Locks held: | |
1140 | * avc->lock(R) if setLocks && !slowPass | |
1141 | * avc->lock(W) if !setLocks || slowPass | |
1142 | * adc->lock(W) | |
1143 | */ | |
1144 | code = rxfs_fetchInit( | |
1145 | tc, rxconn, avc, base, size, &length, adc, fP, &ops, &rock); | |
1146 | ||
1147 | #ifndef AFS_NOSTATS | |
1148 | osi_GetuTime(&xferStartTime); | |
1149 | #endif /* AFS_NOSTATS */ | |
1150 | ||
1151 | adc->validPos = base; | |
1152 | ||
1153 | if (code) { | |
1154 | goto done; | |
1155 | } | |
1156 | ||
1157 | do { | |
1158 | if (avc->f.states & CForeign) { | |
1159 | code = (*ops->more)(rock, &length, &moredata); | |
1160 | if ( code ) | |
1161 | goto done; | |
1162 | } | |
1163 | #ifndef AFS_NOSTATS | |
1164 | bytesToXfer += length; | |
1165 | #endif /* AFS_NOSTATS */ | |
1166 | while (length > 0) { | |
1167 | #ifdef RX_KERNEL_TRACE | |
1168 | afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING, | |
1169 | "before rx_Read"); | |
1170 | #endif | |
1171 | code = (*ops->read)(rock, length, &bytesread); | |
1172 | #ifdef RX_KERNEL_TRACE | |
1173 | afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING, | |
1174 | "after rx_Read"); | |
1175 | #endif | |
1176 | #ifndef AFS_NOSTATS | |
1177 | bytesXferred += bytesread; | |
1178 | #endif /* AFS_NOSTATS */ | |
1179 | if ( code ) { | |
1180 | afs_Trace3(afs_iclSetp, CM_TRACE_FETCH64READ, | |
1181 | ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code, | |
1182 | ICL_TYPE_INT32, length); | |
1183 | code = -34; | |
1184 | goto done; | |
1185 | } | |
1186 | code = (*ops->write)(rock, fP, offset, bytesread, &byteswritten); | |
1187 | if ( code ) | |
1188 | goto done; | |
1189 | offset += bytesread; | |
1190 | base += bytesread; | |
1191 | length -= bytesread; | |
1192 | adc->validPos = base; | |
1193 | if (afs_osi_Wakeup(&adc->validPos) == 0) | |
1194 | afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAKE, ICL_TYPE_STRING, | |
1195 | __FILE__, ICL_TYPE_INT32, __LINE__, | |
1196 | ICL_TYPE_POINTER, adc, ICL_TYPE_INT32, | |
1197 | adc->dflags); | |
1198 | } | |
1199 | code = 0; | |
1200 | } while (moredata); | |
1201 | done: | |
1202 | if (!code) | |
1203 | code = (*ops->close)(rock, avc, adc, tsmall); | |
1204 | if (ops) | |
1205 | code = (*ops->destroy)(&rock, code); | |
1206 | ||
1207 | #ifndef AFS_NOSTATS | |
1208 | FillStoreStats(code, AFS_STATS_FS_XFERIDX_FETCHDATA, xferStartTime, | |
1209 | bytesToXfer, bytesXferred); | |
1210 | #endif | |
1211 | XSTATS_END_TIME; | |
1212 | return code; | |
1213 | } |