Commit | Line | Data |
---|---|---|
b9c5136f | 1 | /* Caching facts about regions of the buffer, for optimization. |
429ab54e | 2 | Copyright (C) 1985, 1986, 1987, 1988, 1989, 1993, 1995, 2001, 2002, 2003, |
8cabe764 | 3 | 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. |
b9c5136f KH |
4 | |
5 | This file is part of GNU Emacs. | |
6 | ||
9ec0b715 | 7 | GNU Emacs is free software: you can redistribute it and/or modify |
b9c5136f | 8 | it under the terms of the GNU General Public License as published by |
9ec0b715 GM |
9 | the Free Software Foundation, either version 3 of the License, or |
10 | (at your option) any later version. | |
b9c5136f KH |
11 | |
12 | GNU Emacs is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
9ec0b715 | 18 | along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */ |
b9c5136f KH |
19 | |
20 | ||
21 | #include <config.h> | |
5890e9f7 JD |
22 | #include <stdio.h> |
23 | ||
b9c5136f KH |
24 | #include "lisp.h" |
25 | #include "buffer.h" | |
26 | #include "region-cache.h" | |
27 | ||
b9c5136f KH |
28 | \f |
29 | /* Data structures. */ | |
30 | ||
31 | /* The region cache. | |
32 | ||
33 | We want something that maps character positions in a buffer onto | |
34 | values. The representation should deal well with long runs of | |
35 | characters with the same value. | |
36 | ||
37 | The tricky part: the representation should be very cheap to | |
38 | maintain in the presence of many insertions and deletions. If the | |
39 | overhead of maintaining the cache is too high, the speedups it | |
40 | offers will be worthless. | |
41 | ||
42 | ||
43 | We represent the region cache as a sorted array of struct | |
44 | boundary's, each of which contains a buffer position and a value; | |
45 | the value applies to all the characters after the buffer position, | |
46 | until the position of the next boundary, or the end of the buffer. | |
47 | ||
48 | The cache always has a boundary whose position is BUF_BEG, so | |
49 | there's always a value associated with every character in the | |
50 | buffer. Since the cache is sorted, this is always the first | |
51 | element of the cache. | |
52 | ||
53 | To facilitate the insertion and deletion of boundaries in the | |
54 | cache, the cache has a gap, just like Emacs's text buffers do. | |
55 | ||
56 | To help boundary positions float along with insertions and | |
57 | deletions, all boundary positions before the cache gap are stored | |
58 | relative to BUF_BEG (buf) (thus they're >= 0), and all boundary | |
59 | positions after the gap are stored relative to BUF_Z (buf) (thus | |
60 | they're <= 0). Look at BOUNDARY_POS to see this in action. See | |
61 | revalidate_region_cache to see how this helps. */ | |
62 | ||
63 | struct boundary { | |
64 | int pos; | |
65 | int value; | |
66 | }; | |
67 | ||
68 | struct region_cache { | |
69 | /* A sorted array of locations where the known-ness of the buffer | |
70 | changes. */ | |
71 | struct boundary *boundaries; | |
72 | ||
73 | /* boundaries[gap_start ... gap_start + gap_len - 1] is the gap. */ | |
74 | int gap_start, gap_len; | |
75 | ||
76 | /* The number of elements allocated to boundaries, not including the | |
77 | gap. */ | |
78 | int cache_len; | |
79 | ||
80 | /* The areas that haven't changed since the last time we cleaned out | |
81 | invalid entries from the cache. These overlap when the buffer is | |
82 | entirely unchanged. */ | |
83 | int beg_unchanged, end_unchanged; | |
84 | ||
85 | /* The first and last positions in the buffer. Because boundaries | |
86 | store their positions relative to the start (BEG) and end (Z) of | |
87 | the buffer, knowing these positions allows us to accurately | |
88 | interpret positions without having to pass the buffer structure | |
89 | or its endpoints around all the time. | |
90 | ||
91 | Yes, buffer_beg is always 1. It's there for symmetry with | |
92 | buffer_end and the BEG and BUF_BEG macros. */ | |
93 | int buffer_beg, buffer_end; | |
94 | }; | |
95 | ||
96 | /* Return the position of boundary i in cache c. */ | |
97 | #define BOUNDARY_POS(c, i) \ | |
98 | ((i) < (c)->gap_start \ | |
99 | ? (c)->buffer_beg + (c)->boundaries[(i)].pos \ | |
100 | : (c)->buffer_end + (c)->boundaries[(c)->gap_len + (i)].pos) | |
101 | ||
102 | /* Return the value for text after boundary i in cache c. */ | |
103 | #define BOUNDARY_VALUE(c, i) \ | |
104 | ((i) < (c)->gap_start \ | |
105 | ? (c)->boundaries[(i)].value \ | |
106 | : (c)->boundaries[(c)->gap_len + (i)].value) | |
107 | ||
108 | /* Set the value for text after boundary i in cache c to v. */ | |
109 | #define SET_BOUNDARY_VALUE(c, i, v) \ | |
110 | ((i) < (c)->gap_start \ | |
111 | ? ((c)->boundaries[(i)].value = (v))\ | |
112 | : ((c)->boundaries[(c)->gap_len + (i)].value = (v))) | |
113 | ||
114 | ||
115 | /* How many elements to add to the gap when we resize the buffer. */ | |
116 | #define NEW_CACHE_GAP (40) | |
117 | ||
118 | /* See invalidate_region_cache; if an invalidation would throw away | |
119 | information about this many characters, call | |
120 | revalidate_region_cache before doing the new invalidation, to | |
121 | preserve that information, instead of throwing it away. */ | |
122 | #define PRESERVE_THRESHOLD (500) | |
123 | ||
124 | static void revalidate_region_cache (); | |
125 | ||
126 | \f | |
127 | /* Interface: Allocating, initializing, and disposing of region caches. */ | |
128 | ||
129 | struct region_cache * | |
130 | new_region_cache () | |
131 | { | |
177c0ea7 | 132 | struct region_cache *c |
b9c5136f KH |
133 | = (struct region_cache *) xmalloc (sizeof (struct region_cache)); |
134 | ||
135 | c->gap_start = 0; | |
136 | c->gap_len = NEW_CACHE_GAP; | |
137 | c->cache_len = 0; | |
138 | c->boundaries = | |
139 | (struct boundary *) xmalloc ((c->gap_len + c->cache_len) | |
140 | * sizeof (*c->boundaries)); | |
141 | ||
142 | c->beg_unchanged = 0; | |
143 | c->end_unchanged = 0; | |
b0ead4a8 SM |
144 | c->buffer_beg = BEG; |
145 | c->buffer_end = BEG; | |
b9c5136f KH |
146 | |
147 | /* Insert the boundary for the buffer start. */ | |
148 | c->cache_len++; | |
149 | c->gap_len--; | |
150 | c->gap_start++; | |
151 | c->boundaries[0].pos = 0; /* from buffer_beg */ | |
152 | c->boundaries[0].value = 0; | |
153 | ||
154 | return c; | |
155 | } | |
156 | ||
157 | void | |
158 | free_region_cache (c) | |
159 | struct region_cache *c; | |
160 | { | |
161 | xfree (c->boundaries); | |
162 | xfree (c); | |
163 | } | |
164 | ||
165 | \f | |
166 | /* Finding positions in the cache. */ | |
167 | ||
168 | /* Return the index of the last boundary in cache C at or before POS. | |
169 | In other words, return the boundary that specifies the value for | |
170 | the region POS..(POS + 1). | |
171 | ||
172 | This operation should be logarithmic in the number of cache | |
173 | entries. It would be nice if it took advantage of locality of | |
174 | reference, too, by searching entries near the last entry found. */ | |
175 | static int | |
176 | find_cache_boundary (c, pos) | |
177 | struct region_cache *c; | |
178 | int pos; | |
179 | { | |
180 | int low = 0, high = c->cache_len; | |
181 | ||
182 | while (low + 1 < high) | |
183 | { | |
184 | /* mid is always a valid index, because low < high and ">> 1" | |
185 | rounds down. */ | |
186 | int mid = (low + high) >> 1; | |
187 | int boundary = BOUNDARY_POS (c, mid); | |
188 | ||
189 | if (pos < boundary) | |
190 | high = mid; | |
191 | else | |
192 | low = mid; | |
193 | } | |
194 | ||
195 | /* Some testing. */ | |
196 | if (BOUNDARY_POS (c, low) > pos | |
197 | || (low + 1 < c->cache_len | |
198 | && BOUNDARY_POS (c, low + 1) <= pos)) | |
199 | abort (); | |
200 | ||
201 | return low; | |
202 | } | |
203 | ||
204 | ||
205 | \f | |
206 | /* Moving the cache gap around, inserting, and deleting. */ | |
207 | ||
208 | ||
209 | /* Move the gap of cache C to index POS, and make sure it has space | |
210 | for at least MIN_SIZE boundaries. */ | |
211 | static void | |
212 | move_cache_gap (c, pos, min_size) | |
213 | struct region_cache *c; | |
214 | int pos; | |
215 | int min_size; | |
216 | { | |
217 | /* Copy these out of the cache and into registers. */ | |
218 | int gap_start = c->gap_start; | |
219 | int gap_len = c->gap_len; | |
220 | int buffer_beg = c->buffer_beg; | |
221 | int buffer_end = c->buffer_end; | |
222 | ||
223 | if (pos < 0 | |
224 | || pos > c->cache_len) | |
225 | abort (); | |
226 | ||
227 | /* We mustn't ever try to put the gap before the dummy start | |
228 | boundary. That must always be start-relative. */ | |
229 | if (pos == 0) | |
230 | abort (); | |
231 | ||
232 | /* Need we move the gap right? */ | |
233 | while (gap_start < pos) | |
234 | { | |
235 | /* Copy one boundary from after to before the gap, and | |
236 | convert its position to start-relative. */ | |
237 | c->boundaries[gap_start].pos | |
238 | = (buffer_end | |
239 | + c->boundaries[gap_start + gap_len].pos | |
240 | - buffer_beg); | |
241 | c->boundaries[gap_start].value | |
242 | = c->boundaries[gap_start + gap_len].value; | |
243 | gap_start++; | |
244 | } | |
245 | ||
246 | /* To enlarge the gap, we need to re-allocate the boundary array, and | |
247 | then shift the area after the gap to the new end. Since the cost | |
248 | is proportional to the amount of stuff after the gap, we do the | |
249 | enlargement here, after a right shift but before a left shift, | |
250 | when the portion after the gap is smallest. */ | |
251 | if (gap_len < min_size) | |
252 | { | |
253 | int i; | |
254 | ||
255 | /* Always make at least NEW_CACHE_GAP elements, as long as we're | |
256 | expanding anyway. */ | |
257 | if (min_size < NEW_CACHE_GAP) | |
258 | min_size = NEW_CACHE_GAP; | |
259 | ||
260 | c->boundaries = | |
261 | (struct boundary *) xrealloc (c->boundaries, | |
262 | ((min_size + c->cache_len) | |
263 | * sizeof (*c->boundaries))); | |
264 | ||
265 | /* Some systems don't provide a version of the copy routine that | |
266 | can be trusted to shift memory upward into an overlapping | |
267 | region. memmove isn't widely available. */ | |
268 | min_size -= gap_len; | |
269 | for (i = c->cache_len - 1; i >= gap_start; i--) | |
270 | { | |
271 | c->boundaries[i + min_size].pos = c->boundaries[i + gap_len].pos; | |
272 | c->boundaries[i + min_size].value = c->boundaries[i + gap_len].value; | |
273 | } | |
274 | ||
275 | gap_len = min_size; | |
276 | } | |
277 | ||
278 | /* Need we move the gap left? */ | |
279 | while (pos < gap_start) | |
280 | { | |
281 | gap_start--; | |
282 | ||
283 | /* Copy one region from before to after the gap, and | |
284 | convert its position to end-relative. */ | |
285 | c->boundaries[gap_start + gap_len].pos | |
286 | = c->boundaries[gap_start].pos + buffer_beg - buffer_end; | |
287 | c->boundaries[gap_start + gap_len].value | |
288 | = c->boundaries[gap_start].value; | |
289 | } | |
290 | ||
291 | /* Assign these back into the cache. */ | |
292 | c->gap_start = gap_start; | |
293 | c->gap_len = gap_len; | |
294 | } | |
295 | ||
296 | ||
297 | /* Insert a new boundary in cache C; it will have cache index INDEX, | |
298 | and have the specified POS and VALUE. */ | |
299 | static void | |
300 | insert_cache_boundary (c, index, pos, value) | |
301 | struct region_cache *c; | |
302 | int index; | |
303 | int pos, value; | |
304 | { | |
305 | /* index must be a valid cache index. */ | |
306 | if (index < 0 || index > c->cache_len) | |
307 | abort (); | |
308 | ||
309 | /* We must never want to insert something before the dummy first | |
310 | boundary. */ | |
311 | if (index == 0) | |
312 | abort (); | |
313 | ||
314 | /* We must only be inserting things in order. */ | |
315 | if (! (BOUNDARY_POS (c, index-1) < pos | |
316 | && (index == c->cache_len | |
317 | || pos < BOUNDARY_POS (c, index)))) | |
318 | abort (); | |
319 | ||
320 | /* The value must be different from the ones around it. However, we | |
321 | temporarily create boundaries that establish the same value as | |
322 | the subsequent boundary, so we're not going to flag that case. */ | |
323 | if (BOUNDARY_VALUE (c, index-1) == value) | |
324 | abort (); | |
325 | ||
326 | move_cache_gap (c, index, 1); | |
327 | ||
328 | c->boundaries[index].pos = pos - c->buffer_beg; | |
329 | c->boundaries[index].value = value; | |
330 | c->gap_start++; | |
331 | c->gap_len--; | |
332 | c->cache_len++; | |
333 | } | |
334 | ||
335 | ||
336 | /* Delete the i'th entry from cache C if START <= i < END. */ | |
337 | ||
338 | static void | |
339 | delete_cache_boundaries (c, start, end) | |
340 | struct region_cache *c; | |
341 | int start, end; | |
342 | { | |
343 | int len = end - start; | |
344 | ||
345 | /* Gotta be in range. */ | |
346 | if (start < 0 | |
347 | || end > c->cache_len) | |
348 | abort (); | |
349 | ||
350 | /* Gotta be in order. */ | |
351 | if (start > end) | |
352 | abort (); | |
353 | ||
354 | /* Can't delete the dummy entry. */ | |
355 | if (start == 0 | |
356 | && end >= 1) | |
357 | abort (); | |
358 | ||
359 | /* Minimize gap motion. If we're deleting nothing, do nothing. */ | |
360 | if (len == 0) | |
361 | ; | |
362 | /* If the gap is before the region to delete, delete from the start | |
363 | forward. */ | |
364 | else if (c->gap_start <= start) | |
365 | { | |
366 | move_cache_gap (c, start, 0); | |
367 | c->gap_len += len; | |
368 | } | |
369 | /* If the gap is after the region to delete, delete from the end | |
370 | backward. */ | |
371 | else if (end <= c->gap_start) | |
372 | { | |
373 | move_cache_gap (c, end, 0); | |
374 | c->gap_start -= len; | |
375 | c->gap_len += len; | |
376 | } | |
377 | /* If the gap is in the region to delete, just expand it. */ | |
378 | else | |
379 | { | |
380 | c->gap_start = start; | |
381 | c->gap_len += len; | |
382 | } | |
383 | ||
384 | c->cache_len -= len; | |
385 | } | |
177c0ea7 | 386 | |
b9c5136f KH |
387 | |
388 | \f | |
389 | /* Set the value for a region. */ | |
390 | ||
391 | /* Set the value in cache C for the region START..END to VALUE. */ | |
392 | static void | |
393 | set_cache_region (c, start, end, value) | |
394 | struct region_cache *c; | |
395 | int start, end; | |
396 | int value; | |
397 | { | |
398 | if (start > end) | |
399 | abort (); | |
400 | if (start < c->buffer_beg | |
401 | || end > c->buffer_end) | |
402 | abort (); | |
403 | ||
404 | /* Eliminate this case; then we can assume that start and end-1 are | |
405 | both the locations of real characters in the buffer. */ | |
406 | if (start == end) | |
407 | return; | |
177c0ea7 | 408 | |
b9c5136f KH |
409 | { |
410 | /* We need to make sure that there are no boundaries in the area | |
411 | between start to end; the whole area will have the same value, | |
412 | so those boundaries will not be necessary. | |
177c0ea7 | 413 | |
b9c5136f KH |
414 | Let start_ix be the cache index of the boundary governing the |
415 | first character of start..end, and let end_ix be the cache | |
416 | index of the earliest boundary after the last character in | |
417 | start..end. (This tortured terminology is intended to answer | |
418 | all the "< or <=?" sort of questions.) */ | |
419 | int start_ix = find_cache_boundary (c, start); | |
420 | int end_ix = find_cache_boundary (c, end - 1) + 1; | |
421 | ||
422 | /* We must remember the value established by the last boundary | |
423 | before end; if that boundary's domain stretches beyond end, | |
424 | we'll need to create a new boundary at end, and that boundary | |
425 | must have that remembered value. */ | |
426 | int value_at_end = BOUNDARY_VALUE (c, end_ix - 1); | |
427 | ||
428 | /* Delete all boundaries strictly within start..end; this means | |
429 | those whose indices are between start_ix (exclusive) and end_ix | |
430 | (exclusive). */ | |
431 | delete_cache_boundaries (c, start_ix + 1, end_ix); | |
432 | ||
433 | /* Make sure we have the right value established going in to | |
434 | start..end from the left, and no unnecessary boundaries. */ | |
435 | if (BOUNDARY_POS (c, start_ix) == start) | |
436 | { | |
437 | /* Is this boundary necessary? If no, remove it; if yes, set | |
438 | its value. */ | |
439 | if (start_ix > 0 | |
440 | && BOUNDARY_VALUE (c, start_ix - 1) == value) | |
441 | { | |
442 | delete_cache_boundaries (c, start_ix, start_ix + 1); | |
443 | start_ix--; | |
444 | } | |
445 | else | |
446 | SET_BOUNDARY_VALUE (c, start_ix, value); | |
447 | } | |
448 | else | |
449 | { | |
450 | /* Do we need to add a new boundary here? */ | |
451 | if (BOUNDARY_VALUE (c, start_ix) != value) | |
452 | { | |
453 | insert_cache_boundary (c, start_ix + 1, start, value); | |
454 | start_ix++; | |
455 | } | |
456 | } | |
177c0ea7 | 457 | |
b9c5136f KH |
458 | /* This is equivalent to letting end_ix float (like a buffer |
459 | marker does) with the insertions and deletions we may have | |
460 | done. */ | |
461 | end_ix = start_ix + 1; | |
462 | ||
463 | /* Make sure we have the correct value established as we leave | |
464 | start..end to the right. */ | |
465 | if (end == c->buffer_end) | |
466 | /* There is no text after start..end; nothing to do. */ | |
467 | ; | |
468 | else if (end_ix >= c->cache_len | |
469 | || end < BOUNDARY_POS (c, end_ix)) | |
470 | { | |
471 | /* There is no boundary at end, but we may need one. */ | |
472 | if (value_at_end != value) | |
473 | insert_cache_boundary (c, end_ix, end, value_at_end); | |
474 | } | |
475 | else | |
476 | { | |
477 | /* There is a boundary at end; should it be there? */ | |
478 | if (value == BOUNDARY_VALUE (c, end_ix)) | |
479 | delete_cache_boundaries (c, end_ix, end_ix + 1); | |
480 | } | |
481 | } | |
482 | } | |
483 | ||
484 | ||
485 | \f | |
486 | /* Interface: Invalidating the cache. Private: Re-validating the cache. */ | |
487 | ||
488 | /* Indicate that a section of BUF has changed, to invalidate CACHE. | |
489 | HEAD is the number of chars unchanged at the beginning of the buffer. | |
490 | TAIL is the number of chars unchanged at the end of the buffer. | |
491 | NOTE: this is *not* the same as the ending position of modified | |
492 | region. | |
493 | (This way of specifying regions makes more sense than absolute | |
494 | buffer positions in the presence of insertions and deletions; the | |
495 | args to pass are the same before and after such an operation.) */ | |
496 | void | |
497 | invalidate_region_cache (buf, c, head, tail) | |
498 | struct buffer *buf; | |
499 | struct region_cache *c; | |
500 | int head, tail; | |
501 | { | |
502 | /* Let chead = c->beg_unchanged, and | |
503 | ctail = c->end_unchanged. | |
504 | If z-tail < beg+chead by a large amount, or | |
505 | z-ctail < beg+head by a large amount, | |
506 | ||
507 | then cutting back chead and ctail to head and tail would lose a | |
508 | lot of information that we could preserve by revalidating the | |
509 | cache before processing this invalidation. Losing that | |
510 | information may be more costly than revalidating the cache now. | |
511 | So go ahead and call revalidate_region_cache if it seems that it | |
512 | might be worthwhile. */ | |
513 | if (((BUF_BEG (buf) + c->beg_unchanged) - (BUF_Z (buf) - tail) | |
514 | > PRESERVE_THRESHOLD) | |
515 | || ((BUF_BEG (buf) + head) - (BUF_Z (buf) - c->end_unchanged) | |
516 | > PRESERVE_THRESHOLD)) | |
517 | revalidate_region_cache (buf, c); | |
518 | ||
519 | ||
520 | if (head < c->beg_unchanged) | |
521 | c->beg_unchanged = head; | |
522 | if (tail < c->end_unchanged) | |
523 | c->end_unchanged = tail; | |
524 | ||
525 | /* We now know nothing about the region between the unchanged head | |
526 | and the unchanged tail (call it the "modified region"), not even | |
527 | its length. | |
528 | ||
529 | If the modified region has shrunk in size (deletions do this), | |
530 | then the cache may now contain boundaries originally located in | |
531 | text that doesn't exist any more. | |
532 | ||
533 | If the modified region has increased in size (insertions do | |
534 | this), then there may now be boundaries in the modified region | |
535 | whose positions are wrong. | |
536 | ||
537 | Even calling BOUNDARY_POS on boundaries still in the unchanged | |
538 | head or tail may well give incorrect answers now, since | |
539 | c->buffer_beg and c->buffer_end may well be wrong now. (Well, | |
540 | okay, c->buffer_beg never changes, so boundaries in the unchanged | |
541 | head will still be okay. But it's the principle of the thing.) | |
542 | ||
543 | So things are generally a mess. | |
544 | ||
545 | But we don't clean up this mess here; that would be expensive, | |
546 | and this function gets called every time any buffer modification | |
547 | occurs. Rather, we can clean up everything in one swell foop, | |
548 | accounting for all the modifications at once, by calling | |
549 | revalidate_region_cache before we try to consult the cache the | |
550 | next time. */ | |
551 | } | |
552 | ||
553 | ||
177c0ea7 | 554 | /* Clean out any cache entries applying to the modified region, and |
b9c5136f KH |
555 | make the positions of the remaining entries accurate again. |
556 | ||
557 | After calling this function, the mess described in the comment in | |
558 | invalidate_region_cache is cleaned up. | |
559 | ||
560 | This function operates by simply throwing away everything it knows | |
561 | about the modified region. It doesn't care exactly which | |
562 | insertions and deletions took place; it just tosses it all. | |
563 | ||
564 | For example, if you insert a single character at the beginning of | |
565 | the buffer, and a single character at the end of the buffer (for | |
566 | example), without calling this function in between the two | |
567 | insertions, then the entire cache will be freed of useful | |
568 | information. On the other hand, if you do manage to call this | |
569 | function in between the two insertions, then the modified regions | |
570 | will be small in both cases, no information will be tossed, and the | |
571 | cache will know that it doesn't have knowledge of the first and | |
572 | last characters any more. | |
573 | ||
574 | Calling this function may be expensive; it does binary searches in | |
575 | the cache, and causes cache gap motion. */ | |
576 | ||
577 | static void | |
578 | revalidate_region_cache (buf, c) | |
579 | struct buffer *buf; | |
580 | struct region_cache *c; | |
581 | { | |
582 | /* The boundaries now in the cache are expressed relative to the | |
583 | buffer_beg and buffer_end values stored in the cache. Now, | |
584 | buffer_beg and buffer_end may not be the same as BUF_BEG (buf) | |
585 | and BUF_Z (buf), so we have two different "bases" to deal with | |
586 | --- the cache's, and the buffer's. */ | |
587 | ||
588 | /* If the entire buffer is still valid, don't waste time. Yes, this | |
589 | should be a >, not a >=; think about what beg_unchanged and | |
590 | end_unchanged get set to when the only change has been an | |
591 | insertion. */ | |
592 | if (c->buffer_beg + c->beg_unchanged | |
593 | > c->buffer_end - c->end_unchanged) | |
594 | return; | |
595 | ||
596 | /* If all the text we knew about as of the last cache revalidation | |
597 | is still there, then all of the information in the cache is still | |
598 | valid. Because c->buffer_beg and c->buffer_end are out-of-date, | |
599 | the modified region appears from the cache's point of view to be | |
600 | a null region located someplace in the buffer. | |
601 | ||
602 | Now, invalidating that empty string will have no actual affect on | |
603 | the cache; instead, we need to update the cache's basis first | |
604 | (which will give the modified region the same size in the cache | |
605 | as it has in the buffer), and then invalidate the modified | |
606 | region. */ | |
177c0ea7 | 607 | if (c->buffer_beg + c->beg_unchanged |
b9c5136f KH |
608 | == c->buffer_end - c->end_unchanged) |
609 | { | |
610 | /* Move the gap so that all the boundaries in the unchanged head | |
611 | are expressed beg-relative, and all the boundaries in the | |
612 | unchanged tail are expressed end-relative. That done, we can | |
613 | plug in the new buffer beg and end, and all the positions | |
614 | will be accurate. | |
615 | ||
616 | The boundary which has jurisdiction over the modified region | |
617 | should be left before the gap. */ | |
618 | move_cache_gap (c, | |
619 | (find_cache_boundary (c, (c->buffer_beg | |
620 | + c->beg_unchanged)) | |
621 | + 1), | |
622 | 0); | |
623 | ||
624 | c->buffer_beg = BUF_BEG (buf); | |
625 | c->buffer_end = BUF_Z (buf); | |
626 | ||
627 | /* Now that the cache's basis has been changed, the modified | |
628 | region actually takes up some space in the cache, so we can | |
629 | invalidate it. */ | |
630 | set_cache_region (c, | |
631 | c->buffer_beg + c->beg_unchanged, | |
632 | c->buffer_end - c->end_unchanged, | |
633 | 0); | |
634 | } | |
635 | ||
636 | /* Otherwise, there is a non-empty region in the cache which | |
637 | corresponds to the modified region of the buffer. */ | |
638 | else | |
639 | { | |
640 | int modified_ix; | |
641 | ||
642 | /* These positions are correct, relative to both the cache basis | |
643 | and the buffer basis. */ | |
644 | set_cache_region (c, | |
645 | c->buffer_beg + c->beg_unchanged, | |
646 | c->buffer_end - c->end_unchanged, | |
647 | 0); | |
648 | ||
649 | /* Now the cache contains only boundaries that are in the | |
650 | unchanged head and tail; we've disposed of any boundaries | |
651 | whose positions we can't be sure of given the information | |
652 | we've saved. | |
653 | ||
654 | If we put the cache gap between the unchanged head and the | |
655 | unchanged tail, we can adjust all the boundary positions at | |
656 | once, simply by setting buffer_beg and buffer_end. | |
657 | ||
658 | The boundary which has jurisdiction over the modified region | |
659 | should be left before the gap. */ | |
660 | modified_ix = | |
661 | find_cache_boundary (c, (c->buffer_beg + c->beg_unchanged)) + 1; | |
662 | move_cache_gap (c, modified_ix, 0); | |
663 | ||
664 | c->buffer_beg = BUF_BEG (buf); | |
665 | c->buffer_end = BUF_Z (buf); | |
666 | ||
667 | /* Now, we may have shrunk the buffer when we changed the basis, | |
668 | and brought the boundaries we created for the start and end | |
669 | of the modified region together, giving them the same | |
670 | position. If that's the case, we should collapse them into | |
671 | one boundary. Or we may even delete them both, if the values | |
672 | before and after them are the same. */ | |
673 | if (modified_ix < c->cache_len | |
674 | && (BOUNDARY_POS (c, modified_ix - 1) | |
675 | == BOUNDARY_POS (c, modified_ix))) | |
676 | { | |
677 | int value_after = BOUNDARY_VALUE (c, modified_ix); | |
678 | ||
679 | /* Should we remove both of the boundaries? Yes, if the | |
680 | latter boundary is now establishing the same value that | |
681 | the former boundary's predecessor does. */ | |
682 | if (modified_ix - 1 > 0 | |
683 | && value_after == BOUNDARY_VALUE (c, modified_ix - 2)) | |
684 | delete_cache_boundaries (c, modified_ix - 1, modified_ix + 1); | |
685 | else | |
686 | { | |
687 | /* We do need a boundary here; collapse the two | |
688 | boundaries into one. */ | |
689 | SET_BOUNDARY_VALUE (c, modified_ix - 1, value_after); | |
690 | delete_cache_boundaries (c, modified_ix, modified_ix + 1); | |
691 | } | |
692 | } | |
693 | } | |
694 | ||
695 | /* Now the entire cache is valid. */ | |
696 | c->beg_unchanged | |
697 | = c->end_unchanged | |
698 | = c->buffer_end - c->buffer_beg; | |
699 | } | |
700 | ||
701 | \f | |
702 | /* Interface: Adding information to the cache. */ | |
703 | ||
704 | /* Assert that the region of BUF between START and END (absolute | |
705 | buffer positions) is "known," for the purposes of CACHE (e.g. "has | |
706 | no newlines", in the case of the line cache). */ | |
707 | void | |
708 | know_region_cache (buf, c, start, end) | |
709 | struct buffer *buf; | |
710 | struct region_cache *c; | |
711 | int start, end; | |
712 | { | |
713 | revalidate_region_cache (buf, c); | |
714 | ||
715 | set_cache_region (c, start, end, 1); | |
716 | } | |
717 | ||
718 | \f | |
719 | /* Interface: using the cache. */ | |
720 | ||
721 | /* Return true if the text immediately after POS in BUF is known, for | |
177c0ea7 | 722 | the purposes of CACHE. If NEXT is non-zero, set *NEXT to the nearest |
b9c5136f KH |
723 | position after POS where the knownness changes. */ |
724 | int | |
725 | region_cache_forward (buf, c, pos, next) | |
726 | struct buffer *buf; | |
727 | struct region_cache *c; | |
728 | int pos; | |
729 | int *next; | |
730 | { | |
731 | revalidate_region_cache (buf, c); | |
732 | ||
733 | { | |
734 | int i = find_cache_boundary (c, pos); | |
735 | int i_value = BOUNDARY_VALUE (c, i); | |
736 | int j; | |
737 | ||
738 | /* Beyond the end of the buffer is unknown, by definition. */ | |
739 | if (pos >= BUF_Z (buf)) | |
740 | { | |
741 | if (next) *next = BUF_Z (buf); | |
742 | i_value = 0; | |
743 | } | |
744 | else if (next) | |
745 | { | |
746 | /* Scan forward from i to find the next differing position. */ | |
747 | for (j = i + 1; j < c->cache_len; j++) | |
748 | if (BOUNDARY_VALUE (c, j) != i_value) | |
749 | break; | |
750 | ||
751 | if (j < c->cache_len) | |
752 | *next = BOUNDARY_POS (c, j); | |
753 | else | |
754 | *next = BUF_Z (buf); | |
755 | } | |
756 | ||
757 | return i_value; | |
758 | } | |
759 | } | |
760 | ||
761 | /* Return true if the text immediately before POS in BUF is known, for | |
762 | the purposes of CACHE. If NEXT is non-zero, set *NEXT to the nearest | |
763 | position before POS where the knownness changes. */ | |
764 | int region_cache_backward (buf, c, pos, next) | |
765 | struct buffer *buf; | |
766 | struct region_cache *c; | |
767 | int pos; | |
768 | int *next; | |
769 | { | |
770 | revalidate_region_cache (buf, c); | |
771 | ||
772 | /* Before the beginning of the buffer is unknown, by | |
773 | definition. */ | |
774 | if (pos <= BUF_BEG (buf)) | |
775 | { | |
776 | if (next) *next = BUF_BEG (buf); | |
777 | return 0; | |
778 | } | |
779 | ||
780 | { | |
781 | int i = find_cache_boundary (c, pos - 1); | |
782 | int i_value = BOUNDARY_VALUE (c, i); | |
783 | int j; | |
784 | ||
785 | if (next) | |
786 | { | |
787 | /* Scan backward from i to find the next differing position. */ | |
788 | for (j = i - 1; j >= 0; j--) | |
789 | if (BOUNDARY_VALUE (c, j) != i_value) | |
790 | break; | |
791 | ||
792 | if (j >= 0) | |
793 | *next = BOUNDARY_POS (c, j + 1); | |
794 | else | |
795 | *next = BUF_BEG (buf); | |
796 | } | |
797 | ||
798 | return i_value; | |
799 | } | |
800 | } | |
801 | ||
802 | \f | |
803 | /* Debugging: pretty-print a cache to the standard error output. */ | |
804 | ||
805 | void | |
806 | pp_cache (c) | |
807 | struct region_cache *c; | |
808 | { | |
809 | int i; | |
810 | int beg_u = c->buffer_beg + c->beg_unchanged; | |
811 | int end_u = c->buffer_end - c->end_unchanged; | |
812 | ||
813 | fprintf (stderr, | |
814 | "basis: %d..%d modified: %d..%d\n", | |
815 | c->buffer_beg, c->buffer_end, | |
816 | beg_u, end_u); | |
817 | ||
818 | for (i = 0; i < c->cache_len; i++) | |
819 | { | |
820 | int pos = BOUNDARY_POS (c, i); | |
821 | ||
822 | putc (((pos < beg_u) ? 'v' | |
823 | : (pos == beg_u) ? '-' | |
824 | : ' '), | |
825 | stderr); | |
826 | putc (((pos > end_u) ? '^' | |
827 | : (pos == end_u) ? '-' | |
828 | : ' '), | |
829 | stderr); | |
830 | fprintf (stderr, "%d : %d\n", pos, BOUNDARY_VALUE (c, i)); | |
831 | } | |
832 | } | |
ab5796a9 MB |
833 | |
834 | /* arch-tag: 98c29f3f-2ca2-4e3a-92f0-f2249200a17d | |
835 | (do not change this comment) */ |