Commit | Line | Data |
---|---|---|
b9c5136f | 1 | /* Caching facts about regions of the buffer, for optimization. |
95df8112 | 2 | |
acaf905b | 3 | Copyright (C) 1985-1989, 1993, 1995, 2001-2012 |
95df8112 | 4 | Free Software Foundation, Inc. |
b9c5136f KH |
5 | |
6 | This file is part of GNU Emacs. | |
7 | ||
9ec0b715 | 8 | GNU Emacs is free software: you can redistribute it and/or modify |
b9c5136f | 9 | it under the terms of the GNU General Public License as published by |
9ec0b715 GM |
10 | the Free Software Foundation, either version 3 of the License, or |
11 | (at your option) any later version. | |
b9c5136f KH |
12 | |
13 | GNU Emacs is distributed in the hope that it will be useful, | |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | GNU General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
9ec0b715 | 19 | along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */ |
b9c5136f KH |
20 | |
21 | ||
22 | #include <config.h> | |
5890e9f7 | 23 | #include <stdio.h> |
d7306fe6 | 24 | #include <setjmp.h> |
5890e9f7 | 25 | |
b9c5136f | 26 | #include "lisp.h" |
e5560ff7 | 27 | #include "character.h" |
b9c5136f KH |
28 | #include "buffer.h" |
29 | #include "region-cache.h" | |
30 | ||
b9c5136f KH |
31 | \f |
32 | /* Data structures. */ | |
33 | ||
34 | /* The region cache. | |
35 | ||
36 | We want something that maps character positions in a buffer onto | |
37 | values. The representation should deal well with long runs of | |
38 | characters with the same value. | |
39 | ||
40 | The tricky part: the representation should be very cheap to | |
41 | maintain in the presence of many insertions and deletions. If the | |
42 | overhead of maintaining the cache is too high, the speedups it | |
43 | offers will be worthless. | |
44 | ||
45 | ||
46 | We represent the region cache as a sorted array of struct | |
47 | boundary's, each of which contains a buffer position and a value; | |
48 | the value applies to all the characters after the buffer position, | |
49 | until the position of the next boundary, or the end of the buffer. | |
50 | ||
51 | The cache always has a boundary whose position is BUF_BEG, so | |
52 | there's always a value associated with every character in the | |
53 | buffer. Since the cache is sorted, this is always the first | |
54 | element of the cache. | |
55 | ||
56 | To facilitate the insertion and deletion of boundaries in the | |
57 | cache, the cache has a gap, just like Emacs's text buffers do. | |
58 | ||
59 | To help boundary positions float along with insertions and | |
60 | deletions, all boundary positions before the cache gap are stored | |
61 | relative to BUF_BEG (buf) (thus they're >= 0), and all boundary | |
62 | positions after the gap are stored relative to BUF_Z (buf) (thus | |
63 | they're <= 0). Look at BOUNDARY_POS to see this in action. See | |
64 | revalidate_region_cache to see how this helps. */ | |
65 | ||
66 | struct boundary { | |
0065d054 | 67 | ptrdiff_t pos; |
b9c5136f KH |
68 | int value; |
69 | }; | |
70 | ||
71 | struct region_cache { | |
72 | /* A sorted array of locations where the known-ness of the buffer | |
73 | changes. */ | |
74 | struct boundary *boundaries; | |
75 | ||
76 | /* boundaries[gap_start ... gap_start + gap_len - 1] is the gap. */ | |
0065d054 | 77 | ptrdiff_t gap_start, gap_len; |
b9c5136f KH |
78 | |
79 | /* The number of elements allocated to boundaries, not including the | |
80 | gap. */ | |
0065d054 | 81 | ptrdiff_t cache_len; |
b9c5136f KH |
82 | |
83 | /* The areas that haven't changed since the last time we cleaned out | |
84 | invalid entries from the cache. These overlap when the buffer is | |
85 | entirely unchanged. */ | |
0065d054 | 86 | ptrdiff_t beg_unchanged, end_unchanged; |
b9c5136f KH |
87 | |
88 | /* The first and last positions in the buffer. Because boundaries | |
89 | store their positions relative to the start (BEG) and end (Z) of | |
90 | the buffer, knowing these positions allows us to accurately | |
91 | interpret positions without having to pass the buffer structure | |
92 | or its endpoints around all the time. | |
93 | ||
94 | Yes, buffer_beg is always 1. It's there for symmetry with | |
95 | buffer_end and the BEG and BUF_BEG macros. */ | |
0065d054 | 96 | ptrdiff_t buffer_beg, buffer_end; |
b9c5136f KH |
97 | }; |
98 | ||
99 | /* Return the position of boundary i in cache c. */ | |
100 | #define BOUNDARY_POS(c, i) \ | |
101 | ((i) < (c)->gap_start \ | |
102 | ? (c)->buffer_beg + (c)->boundaries[(i)].pos \ | |
103 | : (c)->buffer_end + (c)->boundaries[(c)->gap_len + (i)].pos) | |
104 | ||
105 | /* Return the value for text after boundary i in cache c. */ | |
106 | #define BOUNDARY_VALUE(c, i) \ | |
107 | ((i) < (c)->gap_start \ | |
108 | ? (c)->boundaries[(i)].value \ | |
109 | : (c)->boundaries[(c)->gap_len + (i)].value) | |
110 | ||
111 | /* Set the value for text after boundary i in cache c to v. */ | |
112 | #define SET_BOUNDARY_VALUE(c, i, v) \ | |
113 | ((i) < (c)->gap_start \ | |
114 | ? ((c)->boundaries[(i)].value = (v))\ | |
115 | : ((c)->boundaries[(c)->gap_len + (i)].value = (v))) | |
116 | ||
117 | ||
118 | /* How many elements to add to the gap when we resize the buffer. */ | |
119 | #define NEW_CACHE_GAP (40) | |
120 | ||
121 | /* See invalidate_region_cache; if an invalidation would throw away | |
122 | information about this many characters, call | |
123 | revalidate_region_cache before doing the new invalidation, to | |
124 | preserve that information, instead of throwing it away. */ | |
125 | #define PRESERVE_THRESHOLD (500) | |
126 | ||
971de7fb | 127 | static void revalidate_region_cache (struct buffer *buf, struct region_cache *c); |
b9c5136f KH |
128 | |
129 | \f | |
130 | /* Interface: Allocating, initializing, and disposing of region caches. */ | |
131 | ||
132 | struct region_cache * | |
971de7fb | 133 | new_region_cache (void) |
b9c5136f | 134 | { |
38182d90 | 135 | struct region_cache *c = xmalloc (sizeof *c); |
b9c5136f KH |
136 | |
137 | c->gap_start = 0; | |
138 | c->gap_len = NEW_CACHE_GAP; | |
139 | c->cache_len = 0; | |
23f86fce DA |
140 | c->boundaries = xmalloc ((c->gap_len + c->cache_len) |
141 | * sizeof (*c->boundaries)); | |
b9c5136f KH |
142 | |
143 | c->beg_unchanged = 0; | |
144 | c->end_unchanged = 0; | |
b0ead4a8 SM |
145 | c->buffer_beg = BEG; |
146 | c->buffer_end = BEG; | |
b9c5136f KH |
147 | |
148 | /* Insert the boundary for the buffer start. */ | |
149 | c->cache_len++; | |
150 | c->gap_len--; | |
151 | c->gap_start++; | |
152 | c->boundaries[0].pos = 0; /* from buffer_beg */ | |
153 | c->boundaries[0].value = 0; | |
154 | ||
155 | return c; | |
156 | } | |
157 | ||
158 | void | |
971de7fb | 159 | free_region_cache (struct region_cache *c) |
b9c5136f KH |
160 | { |
161 | xfree (c->boundaries); | |
162 | xfree (c); | |
163 | } | |
164 | ||
165 | \f | |
166 | /* Finding positions in the cache. */ | |
167 | ||
168 | /* Return the index of the last boundary in cache C at or before POS. | |
169 | In other words, return the boundary that specifies the value for | |
170 | the region POS..(POS + 1). | |
171 | ||
172 | This operation should be logarithmic in the number of cache | |
173 | entries. It would be nice if it took advantage of locality of | |
174 | reference, too, by searching entries near the last entry found. */ | |
0065d054 PE |
175 | static ptrdiff_t |
176 | find_cache_boundary (struct region_cache *c, ptrdiff_t pos) | |
b9c5136f | 177 | { |
0065d054 | 178 | ptrdiff_t low = 0, high = c->cache_len; |
b9c5136f KH |
179 | |
180 | while (low + 1 < high) | |
181 | { | |
182 | /* mid is always a valid index, because low < high and ">> 1" | |
183 | rounds down. */ | |
0065d054 PE |
184 | ptrdiff_t mid = (low >> 1) + (high >> 1) + (low & high & 1); |
185 | ptrdiff_t boundary = BOUNDARY_POS (c, mid); | |
b9c5136f KH |
186 | |
187 | if (pos < boundary) | |
188 | high = mid; | |
189 | else | |
190 | low = mid; | |
191 | } | |
192 | ||
193 | /* Some testing. */ | |
194 | if (BOUNDARY_POS (c, low) > pos | |
195 | || (low + 1 < c->cache_len | |
196 | && BOUNDARY_POS (c, low + 1) <= pos)) | |
197 | abort (); | |
198 | ||
199 | return low; | |
200 | } | |
201 | ||
202 | ||
203 | \f | |
204 | /* Moving the cache gap around, inserting, and deleting. */ | |
205 | ||
206 | ||
207 | /* Move the gap of cache C to index POS, and make sure it has space | |
208 | for at least MIN_SIZE boundaries. */ | |
209 | static void | |
0065d054 | 210 | move_cache_gap (struct region_cache *c, ptrdiff_t pos, ptrdiff_t min_size) |
b9c5136f KH |
211 | { |
212 | /* Copy these out of the cache and into registers. */ | |
0065d054 PE |
213 | ptrdiff_t gap_start = c->gap_start; |
214 | ptrdiff_t gap_len = c->gap_len; | |
215 | ptrdiff_t buffer_beg = c->buffer_beg; | |
216 | ptrdiff_t buffer_end = c->buffer_end; | |
b9c5136f KH |
217 | |
218 | if (pos < 0 | |
219 | || pos > c->cache_len) | |
220 | abort (); | |
221 | ||
222 | /* We mustn't ever try to put the gap before the dummy start | |
223 | boundary. That must always be start-relative. */ | |
224 | if (pos == 0) | |
225 | abort (); | |
226 | ||
227 | /* Need we move the gap right? */ | |
228 | while (gap_start < pos) | |
229 | { | |
230 | /* Copy one boundary from after to before the gap, and | |
231 | convert its position to start-relative. */ | |
232 | c->boundaries[gap_start].pos | |
233 | = (buffer_end | |
234 | + c->boundaries[gap_start + gap_len].pos | |
235 | - buffer_beg); | |
236 | c->boundaries[gap_start].value | |
237 | = c->boundaries[gap_start + gap_len].value; | |
238 | gap_start++; | |
239 | } | |
240 | ||
241 | /* To enlarge the gap, we need to re-allocate the boundary array, and | |
242 | then shift the area after the gap to the new end. Since the cost | |
243 | is proportional to the amount of stuff after the gap, we do the | |
244 | enlargement here, after a right shift but before a left shift, | |
245 | when the portion after the gap is smallest. */ | |
246 | if (gap_len < min_size) | |
247 | { | |
0065d054 | 248 | ptrdiff_t i; |
b9c5136f KH |
249 | |
250 | c->boundaries = | |
0065d054 PE |
251 | xpalloc (c->boundaries, &c->cache_len, min_size, -1, |
252 | sizeof *c->boundaries); | |
b9c5136f KH |
253 | |
254 | /* Some systems don't provide a version of the copy routine that | |
255 | can be trusted to shift memory upward into an overlapping | |
256 | region. memmove isn't widely available. */ | |
257 | min_size -= gap_len; | |
258 | for (i = c->cache_len - 1; i >= gap_start; i--) | |
259 | { | |
260 | c->boundaries[i + min_size].pos = c->boundaries[i + gap_len].pos; | |
261 | c->boundaries[i + min_size].value = c->boundaries[i + gap_len].value; | |
262 | } | |
263 | ||
264 | gap_len = min_size; | |
265 | } | |
266 | ||
267 | /* Need we move the gap left? */ | |
268 | while (pos < gap_start) | |
269 | { | |
270 | gap_start--; | |
271 | ||
272 | /* Copy one region from before to after the gap, and | |
273 | convert its position to end-relative. */ | |
274 | c->boundaries[gap_start + gap_len].pos | |
275 | = c->boundaries[gap_start].pos + buffer_beg - buffer_end; | |
276 | c->boundaries[gap_start + gap_len].value | |
277 | = c->boundaries[gap_start].value; | |
278 | } | |
279 | ||
280 | /* Assign these back into the cache. */ | |
281 | c->gap_start = gap_start; | |
282 | c->gap_len = gap_len; | |
283 | } | |
284 | ||
285 | ||
c4fc4e30 | 286 | /* Insert a new boundary in cache C; it will have cache index I, |
b9c5136f KH |
287 | and have the specified POS and VALUE. */ |
288 | static void | |
0065d054 | 289 | insert_cache_boundary (struct region_cache *c, ptrdiff_t i, ptrdiff_t pos, |
c098fdb8 | 290 | int value) |
b9c5136f | 291 | { |
c4fc4e30 PE |
292 | /* i must be a valid cache index. */ |
293 | if (i < 0 || i > c->cache_len) | |
b9c5136f KH |
294 | abort (); |
295 | ||
296 | /* We must never want to insert something before the dummy first | |
297 | boundary. */ | |
c4fc4e30 | 298 | if (i == 0) |
b9c5136f KH |
299 | abort (); |
300 | ||
301 | /* We must only be inserting things in order. */ | |
c4fc4e30 PE |
302 | if (! (BOUNDARY_POS (c, i - 1) < pos |
303 | && (i == c->cache_len | |
304 | || pos < BOUNDARY_POS (c, i)))) | |
b9c5136f KH |
305 | abort (); |
306 | ||
307 | /* The value must be different from the ones around it. However, we | |
308 | temporarily create boundaries that establish the same value as | |
309 | the subsequent boundary, so we're not going to flag that case. */ | |
c4fc4e30 | 310 | if (BOUNDARY_VALUE (c, i - 1) == value) |
b9c5136f KH |
311 | abort (); |
312 | ||
c4fc4e30 | 313 | move_cache_gap (c, i, 1); |
b9c5136f | 314 | |
c4fc4e30 PE |
315 | c->boundaries[i].pos = pos - c->buffer_beg; |
316 | c->boundaries[i].value = value; | |
b9c5136f KH |
317 | c->gap_start++; |
318 | c->gap_len--; | |
319 | c->cache_len++; | |
320 | } | |
321 | ||
322 | ||
323 | /* Delete the i'th entry from cache C if START <= i < END. */ | |
324 | ||
325 | static void | |
c098fdb8 | 326 | delete_cache_boundaries (struct region_cache *c, |
0065d054 | 327 | ptrdiff_t start, ptrdiff_t end) |
b9c5136f | 328 | { |
0065d054 | 329 | ptrdiff_t len = end - start; |
b9c5136f KH |
330 | |
331 | /* Gotta be in range. */ | |
332 | if (start < 0 | |
333 | || end > c->cache_len) | |
334 | abort (); | |
335 | ||
336 | /* Gotta be in order. */ | |
337 | if (start > end) | |
338 | abort (); | |
339 | ||
340 | /* Can't delete the dummy entry. */ | |
341 | if (start == 0 | |
342 | && end >= 1) | |
343 | abort (); | |
344 | ||
345 | /* Minimize gap motion. If we're deleting nothing, do nothing. */ | |
346 | if (len == 0) | |
347 | ; | |
348 | /* If the gap is before the region to delete, delete from the start | |
349 | forward. */ | |
350 | else if (c->gap_start <= start) | |
351 | { | |
352 | move_cache_gap (c, start, 0); | |
353 | c->gap_len += len; | |
354 | } | |
355 | /* If the gap is after the region to delete, delete from the end | |
356 | backward. */ | |
357 | else if (end <= c->gap_start) | |
358 | { | |
359 | move_cache_gap (c, end, 0); | |
360 | c->gap_start -= len; | |
361 | c->gap_len += len; | |
362 | } | |
363 | /* If the gap is in the region to delete, just expand it. */ | |
364 | else | |
365 | { | |
366 | c->gap_start = start; | |
367 | c->gap_len += len; | |
368 | } | |
369 | ||
370 | c->cache_len -= len; | |
371 | } | |
177c0ea7 | 372 | |
b9c5136f KH |
373 | |
374 | \f | |
375 | /* Set the value for a region. */ | |
376 | ||
377 | /* Set the value in cache C for the region START..END to VALUE. */ | |
378 | static void | |
c098fdb8 | 379 | set_cache_region (struct region_cache *c, |
0065d054 | 380 | ptrdiff_t start, ptrdiff_t end, int value) |
b9c5136f KH |
381 | { |
382 | if (start > end) | |
383 | abort (); | |
384 | if (start < c->buffer_beg | |
385 | || end > c->buffer_end) | |
386 | abort (); | |
387 | ||
388 | /* Eliminate this case; then we can assume that start and end-1 are | |
389 | both the locations of real characters in the buffer. */ | |
390 | if (start == end) | |
391 | return; | |
177c0ea7 | 392 | |
b9c5136f KH |
393 | { |
394 | /* We need to make sure that there are no boundaries in the area | |
395 | between start to end; the whole area will have the same value, | |
396 | so those boundaries will not be necessary. | |
177c0ea7 | 397 | |
b9c5136f KH |
398 | Let start_ix be the cache index of the boundary governing the |
399 | first character of start..end, and let end_ix be the cache | |
400 | index of the earliest boundary after the last character in | |
401 | start..end. (This tortured terminology is intended to answer | |
402 | all the "< or <=?" sort of questions.) */ | |
0065d054 PE |
403 | ptrdiff_t start_ix = find_cache_boundary (c, start); |
404 | ptrdiff_t end_ix = find_cache_boundary (c, end - 1) + 1; | |
b9c5136f KH |
405 | |
406 | /* We must remember the value established by the last boundary | |
407 | before end; if that boundary's domain stretches beyond end, | |
408 | we'll need to create a new boundary at end, and that boundary | |
409 | must have that remembered value. */ | |
410 | int value_at_end = BOUNDARY_VALUE (c, end_ix - 1); | |
411 | ||
412 | /* Delete all boundaries strictly within start..end; this means | |
413 | those whose indices are between start_ix (exclusive) and end_ix | |
414 | (exclusive). */ | |
415 | delete_cache_boundaries (c, start_ix + 1, end_ix); | |
416 | ||
417 | /* Make sure we have the right value established going in to | |
418 | start..end from the left, and no unnecessary boundaries. */ | |
419 | if (BOUNDARY_POS (c, start_ix) == start) | |
420 | { | |
421 | /* Is this boundary necessary? If no, remove it; if yes, set | |
422 | its value. */ | |
423 | if (start_ix > 0 | |
424 | && BOUNDARY_VALUE (c, start_ix - 1) == value) | |
425 | { | |
426 | delete_cache_boundaries (c, start_ix, start_ix + 1); | |
427 | start_ix--; | |
428 | } | |
429 | else | |
430 | SET_BOUNDARY_VALUE (c, start_ix, value); | |
431 | } | |
432 | else | |
433 | { | |
434 | /* Do we need to add a new boundary here? */ | |
435 | if (BOUNDARY_VALUE (c, start_ix) != value) | |
436 | { | |
437 | insert_cache_boundary (c, start_ix + 1, start, value); | |
438 | start_ix++; | |
439 | } | |
440 | } | |
177c0ea7 | 441 | |
b9c5136f KH |
442 | /* This is equivalent to letting end_ix float (like a buffer |
443 | marker does) with the insertions and deletions we may have | |
444 | done. */ | |
445 | end_ix = start_ix + 1; | |
446 | ||
447 | /* Make sure we have the correct value established as we leave | |
448 | start..end to the right. */ | |
449 | if (end == c->buffer_end) | |
450 | /* There is no text after start..end; nothing to do. */ | |
451 | ; | |
452 | else if (end_ix >= c->cache_len | |
453 | || end < BOUNDARY_POS (c, end_ix)) | |
454 | { | |
455 | /* There is no boundary at end, but we may need one. */ | |
456 | if (value_at_end != value) | |
457 | insert_cache_boundary (c, end_ix, end, value_at_end); | |
458 | } | |
459 | else | |
460 | { | |
461 | /* There is a boundary at end; should it be there? */ | |
462 | if (value == BOUNDARY_VALUE (c, end_ix)) | |
463 | delete_cache_boundaries (c, end_ix, end_ix + 1); | |
464 | } | |
465 | } | |
466 | } | |
467 | ||
468 | ||
469 | \f | |
470 | /* Interface: Invalidating the cache. Private: Re-validating the cache. */ | |
471 | ||
472 | /* Indicate that a section of BUF has changed, to invalidate CACHE. | |
473 | HEAD is the number of chars unchanged at the beginning of the buffer. | |
474 | TAIL is the number of chars unchanged at the end of the buffer. | |
475 | NOTE: this is *not* the same as the ending position of modified | |
476 | region. | |
477 | (This way of specifying regions makes more sense than absolute | |
478 | buffer positions in the presence of insertions and deletions; the | |
479 | args to pass are the same before and after such an operation.) */ | |
480 | void | |
c098fdb8 | 481 | invalidate_region_cache (struct buffer *buf, struct region_cache *c, |
0065d054 | 482 | ptrdiff_t head, ptrdiff_t tail) |
b9c5136f KH |
483 | { |
484 | /* Let chead = c->beg_unchanged, and | |
485 | ctail = c->end_unchanged. | |
486 | If z-tail < beg+chead by a large amount, or | |
487 | z-ctail < beg+head by a large amount, | |
488 | ||
489 | then cutting back chead and ctail to head and tail would lose a | |
490 | lot of information that we could preserve by revalidating the | |
491 | cache before processing this invalidation. Losing that | |
492 | information may be more costly than revalidating the cache now. | |
493 | So go ahead and call revalidate_region_cache if it seems that it | |
494 | might be worthwhile. */ | |
495 | if (((BUF_BEG (buf) + c->beg_unchanged) - (BUF_Z (buf) - tail) | |
496 | > PRESERVE_THRESHOLD) | |
497 | || ((BUF_BEG (buf) + head) - (BUF_Z (buf) - c->end_unchanged) | |
498 | > PRESERVE_THRESHOLD)) | |
499 | revalidate_region_cache (buf, c); | |
500 | ||
501 | ||
502 | if (head < c->beg_unchanged) | |
503 | c->beg_unchanged = head; | |
504 | if (tail < c->end_unchanged) | |
505 | c->end_unchanged = tail; | |
506 | ||
507 | /* We now know nothing about the region between the unchanged head | |
508 | and the unchanged tail (call it the "modified region"), not even | |
509 | its length. | |
510 | ||
511 | If the modified region has shrunk in size (deletions do this), | |
512 | then the cache may now contain boundaries originally located in | |
513 | text that doesn't exist any more. | |
514 | ||
515 | If the modified region has increased in size (insertions do | |
516 | this), then there may now be boundaries in the modified region | |
517 | whose positions are wrong. | |
518 | ||
519 | Even calling BOUNDARY_POS on boundaries still in the unchanged | |
520 | head or tail may well give incorrect answers now, since | |
521 | c->buffer_beg and c->buffer_end may well be wrong now. (Well, | |
522 | okay, c->buffer_beg never changes, so boundaries in the unchanged | |
523 | head will still be okay. But it's the principle of the thing.) | |
524 | ||
525 | So things are generally a mess. | |
526 | ||
527 | But we don't clean up this mess here; that would be expensive, | |
528 | and this function gets called every time any buffer modification | |
529 | occurs. Rather, we can clean up everything in one swell foop, | |
530 | accounting for all the modifications at once, by calling | |
531 | revalidate_region_cache before we try to consult the cache the | |
532 | next time. */ | |
533 | } | |
534 | ||
535 | ||
177c0ea7 | 536 | /* Clean out any cache entries applying to the modified region, and |
b9c5136f KH |
537 | make the positions of the remaining entries accurate again. |
538 | ||
539 | After calling this function, the mess described in the comment in | |
540 | invalidate_region_cache is cleaned up. | |
541 | ||
542 | This function operates by simply throwing away everything it knows | |
543 | about the modified region. It doesn't care exactly which | |
544 | insertions and deletions took place; it just tosses it all. | |
545 | ||
546 | For example, if you insert a single character at the beginning of | |
547 | the buffer, and a single character at the end of the buffer (for | |
548 | example), without calling this function in between the two | |
549 | insertions, then the entire cache will be freed of useful | |
550 | information. On the other hand, if you do manage to call this | |
551 | function in between the two insertions, then the modified regions | |
552 | will be small in both cases, no information will be tossed, and the | |
553 | cache will know that it doesn't have knowledge of the first and | |
554 | last characters any more. | |
555 | ||
556 | Calling this function may be expensive; it does binary searches in | |
557 | the cache, and causes cache gap motion. */ | |
558 | ||
559 | static void | |
971de7fb | 560 | revalidate_region_cache (struct buffer *buf, struct region_cache *c) |
b9c5136f KH |
561 | { |
562 | /* The boundaries now in the cache are expressed relative to the | |
563 | buffer_beg and buffer_end values stored in the cache. Now, | |
564 | buffer_beg and buffer_end may not be the same as BUF_BEG (buf) | |
565 | and BUF_Z (buf), so we have two different "bases" to deal with | |
566 | --- the cache's, and the buffer's. */ | |
567 | ||
568 | /* If the entire buffer is still valid, don't waste time. Yes, this | |
569 | should be a >, not a >=; think about what beg_unchanged and | |
570 | end_unchanged get set to when the only change has been an | |
571 | insertion. */ | |
572 | if (c->buffer_beg + c->beg_unchanged | |
573 | > c->buffer_end - c->end_unchanged) | |
574 | return; | |
575 | ||
576 | /* If all the text we knew about as of the last cache revalidation | |
577 | is still there, then all of the information in the cache is still | |
578 | valid. Because c->buffer_beg and c->buffer_end are out-of-date, | |
579 | the modified region appears from the cache's point of view to be | |
580 | a null region located someplace in the buffer. | |
581 | ||
582 | Now, invalidating that empty string will have no actual affect on | |
583 | the cache; instead, we need to update the cache's basis first | |
584 | (which will give the modified region the same size in the cache | |
585 | as it has in the buffer), and then invalidate the modified | |
586 | region. */ | |
177c0ea7 | 587 | if (c->buffer_beg + c->beg_unchanged |
b9c5136f KH |
588 | == c->buffer_end - c->end_unchanged) |
589 | { | |
590 | /* Move the gap so that all the boundaries in the unchanged head | |
591 | are expressed beg-relative, and all the boundaries in the | |
592 | unchanged tail are expressed end-relative. That done, we can | |
593 | plug in the new buffer beg and end, and all the positions | |
594 | will be accurate. | |
595 | ||
596 | The boundary which has jurisdiction over the modified region | |
597 | should be left before the gap. */ | |
598 | move_cache_gap (c, | |
599 | (find_cache_boundary (c, (c->buffer_beg | |
600 | + c->beg_unchanged)) | |
601 | + 1), | |
602 | 0); | |
603 | ||
604 | c->buffer_beg = BUF_BEG (buf); | |
605 | c->buffer_end = BUF_Z (buf); | |
606 | ||
607 | /* Now that the cache's basis has been changed, the modified | |
608 | region actually takes up some space in the cache, so we can | |
609 | invalidate it. */ | |
610 | set_cache_region (c, | |
611 | c->buffer_beg + c->beg_unchanged, | |
612 | c->buffer_end - c->end_unchanged, | |
613 | 0); | |
614 | } | |
615 | ||
616 | /* Otherwise, there is a non-empty region in the cache which | |
617 | corresponds to the modified region of the buffer. */ | |
618 | else | |
619 | { | |
0065d054 | 620 | ptrdiff_t modified_ix; |
b9c5136f KH |
621 | |
622 | /* These positions are correct, relative to both the cache basis | |
623 | and the buffer basis. */ | |
624 | set_cache_region (c, | |
625 | c->buffer_beg + c->beg_unchanged, | |
626 | c->buffer_end - c->end_unchanged, | |
627 | 0); | |
628 | ||
629 | /* Now the cache contains only boundaries that are in the | |
630 | unchanged head and tail; we've disposed of any boundaries | |
631 | whose positions we can't be sure of given the information | |
632 | we've saved. | |
633 | ||
634 | If we put the cache gap between the unchanged head and the | |
635 | unchanged tail, we can adjust all the boundary positions at | |
636 | once, simply by setting buffer_beg and buffer_end. | |
637 | ||
638 | The boundary which has jurisdiction over the modified region | |
639 | should be left before the gap. */ | |
640 | modified_ix = | |
641 | find_cache_boundary (c, (c->buffer_beg + c->beg_unchanged)) + 1; | |
642 | move_cache_gap (c, modified_ix, 0); | |
643 | ||
644 | c->buffer_beg = BUF_BEG (buf); | |
645 | c->buffer_end = BUF_Z (buf); | |
646 | ||
647 | /* Now, we may have shrunk the buffer when we changed the basis, | |
648 | and brought the boundaries we created for the start and end | |
649 | of the modified region together, giving them the same | |
650 | position. If that's the case, we should collapse them into | |
651 | one boundary. Or we may even delete them both, if the values | |
652 | before and after them are the same. */ | |
653 | if (modified_ix < c->cache_len | |
654 | && (BOUNDARY_POS (c, modified_ix - 1) | |
655 | == BOUNDARY_POS (c, modified_ix))) | |
656 | { | |
657 | int value_after = BOUNDARY_VALUE (c, modified_ix); | |
658 | ||
659 | /* Should we remove both of the boundaries? Yes, if the | |
660 | latter boundary is now establishing the same value that | |
661 | the former boundary's predecessor does. */ | |
662 | if (modified_ix - 1 > 0 | |
663 | && value_after == BOUNDARY_VALUE (c, modified_ix - 2)) | |
664 | delete_cache_boundaries (c, modified_ix - 1, modified_ix + 1); | |
665 | else | |
666 | { | |
667 | /* We do need a boundary here; collapse the two | |
668 | boundaries into one. */ | |
669 | SET_BOUNDARY_VALUE (c, modified_ix - 1, value_after); | |
670 | delete_cache_boundaries (c, modified_ix, modified_ix + 1); | |
671 | } | |
672 | } | |
673 | } | |
674 | ||
675 | /* Now the entire cache is valid. */ | |
676 | c->beg_unchanged | |
677 | = c->end_unchanged | |
678 | = c->buffer_end - c->buffer_beg; | |
679 | } | |
680 | ||
681 | \f | |
682 | /* Interface: Adding information to the cache. */ | |
683 | ||
684 | /* Assert that the region of BUF between START and END (absolute | |
685 | buffer positions) is "known," for the purposes of CACHE (e.g. "has | |
686 | no newlines", in the case of the line cache). */ | |
687 | void | |
c098fdb8 | 688 | know_region_cache (struct buffer *buf, struct region_cache *c, |
0065d054 | 689 | ptrdiff_t start, ptrdiff_t end) |
b9c5136f KH |
690 | { |
691 | revalidate_region_cache (buf, c); | |
692 | ||
693 | set_cache_region (c, start, end, 1); | |
694 | } | |
695 | ||
696 | \f | |
697 | /* Interface: using the cache. */ | |
698 | ||
699 | /* Return true if the text immediately after POS in BUF is known, for | |
177c0ea7 | 700 | the purposes of CACHE. If NEXT is non-zero, set *NEXT to the nearest |
333f9019 | 701 | position after POS where the knowledge changes. */ |
b9c5136f | 702 | int |
c098fdb8 | 703 | region_cache_forward (struct buffer *buf, struct region_cache *c, |
0065d054 | 704 | ptrdiff_t pos, ptrdiff_t *next) |
b9c5136f KH |
705 | { |
706 | revalidate_region_cache (buf, c); | |
707 | ||
708 | { | |
0065d054 | 709 | ptrdiff_t i = find_cache_boundary (c, pos); |
b9c5136f | 710 | int i_value = BOUNDARY_VALUE (c, i); |
0065d054 | 711 | ptrdiff_t j; |
b9c5136f KH |
712 | |
713 | /* Beyond the end of the buffer is unknown, by definition. */ | |
714 | if (pos >= BUF_Z (buf)) | |
715 | { | |
716 | if (next) *next = BUF_Z (buf); | |
717 | i_value = 0; | |
718 | } | |
719 | else if (next) | |
720 | { | |
721 | /* Scan forward from i to find the next differing position. */ | |
722 | for (j = i + 1; j < c->cache_len; j++) | |
723 | if (BOUNDARY_VALUE (c, j) != i_value) | |
724 | break; | |
725 | ||
726 | if (j < c->cache_len) | |
727 | *next = BOUNDARY_POS (c, j); | |
728 | else | |
729 | *next = BUF_Z (buf); | |
730 | } | |
731 | ||
732 | return i_value; | |
733 | } | |
734 | } | |
735 | ||
736 | /* Return true if the text immediately before POS in BUF is known, for | |
737 | the purposes of CACHE. If NEXT is non-zero, set *NEXT to the nearest | |
333f9019 | 738 | position before POS where the knowledge changes. */ |
c098fdb8 | 739 | int region_cache_backward (struct buffer *buf, struct region_cache *c, |
0065d054 | 740 | ptrdiff_t pos, ptrdiff_t *next) |
b9c5136f KH |
741 | { |
742 | revalidate_region_cache (buf, c); | |
743 | ||
744 | /* Before the beginning of the buffer is unknown, by | |
745 | definition. */ | |
746 | if (pos <= BUF_BEG (buf)) | |
747 | { | |
748 | if (next) *next = BUF_BEG (buf); | |
749 | return 0; | |
750 | } | |
751 | ||
752 | { | |
0065d054 | 753 | ptrdiff_t i = find_cache_boundary (c, pos - 1); |
b9c5136f | 754 | int i_value = BOUNDARY_VALUE (c, i); |
0065d054 | 755 | ptrdiff_t j; |
b9c5136f KH |
756 | |
757 | if (next) | |
758 | { | |
759 | /* Scan backward from i to find the next differing position. */ | |
760 | for (j = i - 1; j >= 0; j--) | |
761 | if (BOUNDARY_VALUE (c, j) != i_value) | |
762 | break; | |
763 | ||
764 | if (j >= 0) | |
765 | *next = BOUNDARY_POS (c, j + 1); | |
766 | else | |
767 | *next = BUF_BEG (buf); | |
768 | } | |
769 | ||
770 | return i_value; | |
771 | } | |
772 | } | |
773 | ||
774 | \f | |
775 | /* Debugging: pretty-print a cache to the standard error output. */ | |
776 | ||
e3b27b31 | 777 | void pp_cache (struct region_cache *) EXTERNALLY_VISIBLE; |
b9c5136f | 778 | void |
971de7fb | 779 | pp_cache (struct region_cache *c) |
b9c5136f | 780 | { |
0065d054 PE |
781 | ptrdiff_t i; |
782 | ptrdiff_t beg_u = c->buffer_beg + c->beg_unchanged; | |
783 | ptrdiff_t end_u = c->buffer_end - c->end_unchanged; | |
b9c5136f KH |
784 | |
785 | fprintf (stderr, | |
0065d054 | 786 | "basis: %"pD"d..%"pD"d modified: %"pD"d..%"pD"d\n", |
c2982e87 PE |
787 | c->buffer_beg, c->buffer_end, |
788 | beg_u, end_u); | |
b9c5136f KH |
789 | |
790 | for (i = 0; i < c->cache_len; i++) | |
791 | { | |
0065d054 | 792 | ptrdiff_t pos = BOUNDARY_POS (c, i); |
b9c5136f KH |
793 | |
794 | putc (((pos < beg_u) ? 'v' | |
795 | : (pos == beg_u) ? '-' | |
796 | : ' '), | |
797 | stderr); | |
798 | putc (((pos > end_u) ? '^' | |
799 | : (pos == end_u) ? '-' | |
800 | : ' '), | |
801 | stderr); | |
0065d054 | 802 | fprintf (stderr, "%"pD"d : %d\n", pos, BOUNDARY_VALUE (c, i)); |
b9c5136f KH |
803 | } |
804 | } |