Commit | Line | Data |
---|---|---|
0efc778b | 1 | /* Profiler implementation. |
c2d7786e TM |
2 | |
3 | Copyright (C) 2012 Free Software Foundation, Inc. | |
4 | ||
5 | This file is part of GNU Emacs. | |
6 | ||
7 | GNU Emacs is free software: you can redistribute it and/or modify | |
8 | it under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation, either version 3 of the License, or | |
10 | (at your option) any later version. | |
11 | ||
12 | GNU Emacs is distributed in the hope that it will be useful, | |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | GNU General Public License for more details. | |
16 | ||
17 | You should have received a copy of the GNU General Public License | |
18 | along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */ | |
19 | ||
20 | #include <config.h> | |
c2d7786e | 21 | #include "lisp.h" |
704d3f45 | 22 | #include "syssignal.h" |
d89460ed PE |
23 | #include "systime.h" |
24 | ||
25 | /* Return A + B, but return the maximum fixnum if the result would overflow. | |
26 | Assume A and B are nonnegative and in fixnum range. */ | |
27 | ||
28 | static EMACS_INT | |
29 | saturated_add (EMACS_INT a, EMACS_INT b) | |
30 | { | |
31 | return min (a + b, MOST_POSITIVE_FIXNUM); | |
32 | } | |
c2d7786e | 33 | |
3d80c99f | 34 | /* Logs. */ |
c2d7786e | 35 | |
3d80c99f | 36 | typedef struct Lisp_Hash_Table log_t; |
c2d7786e TM |
37 | |
38 | static Lisp_Object | |
3d80c99f SM |
39 | make_log (int heap_size, int max_stack_depth) |
40 | { | |
41 | /* We use a standard Elisp hash-table object, but we use it in | |
42 | a special way. This is OK as long as the object is not exposed | |
43 | to Elisp, i.e. until it is returned by *-profiler-log, after which | |
44 | it can't be used any more. */ | |
45 | Lisp_Object log = make_hash_table (Qequal, make_number (heap_size), | |
46 | make_float (DEFAULT_REHASH_SIZE), | |
47 | make_float (DEFAULT_REHASH_THRESHOLD), | |
48 | Qnil, Qnil, Qnil); | |
49 | struct Lisp_Hash_Table *h = XHASH_TABLE (log); | |
50 | ||
51 | /* What is special about our hash-tables is that the keys are pre-filled | |
52 | with the vectors we'll put in them. */ | |
53 | int i = ASIZE (h->key_and_value) / 2; | |
54 | while (0 < i) | |
55 | set_hash_key_slot (h, --i, | |
56 | Fmake_vector (make_number (max_stack_depth), Qnil)); | |
57 | return log; | |
c2d7786e TM |
58 | } |
59 | ||
3d80c99f | 60 | /* Evict the least used half of the hash_table. |
c2d7786e | 61 | |
3d80c99f SM |
62 | When the table is full, we have to evict someone. |
63 | The easiest and most efficient is to evict the value we're about to add | |
64 | (i.e. once the table is full, stop sampling). | |
c2d7786e | 65 | |
3d80c99f SM |
66 | We could also pick the element with the lowest count and evict it, |
67 | but finding it is O(N) and for that amount of work we get very | |
68 | little in return: for the next sample, this latest sample will have | |
69 | count==1 and will hence be a prime candidate for eviction :-( | |
c2d7786e | 70 | |
3d80c99f SM |
71 | So instead, we take O(N) time to eliminate more or less half of the |
72 | entries (the half with the lowest counts). So we get an amortized | |
73 | cost of O(1) and we get O(N) time for a new entry to grow larger | |
74 | than the other least counts before a new round of eviction. */ | |
c2d7786e | 75 | |
3d80c99f SM |
76 | static EMACS_INT approximate_median (log_t *log, |
77 | ptrdiff_t start, ptrdiff_t size) | |
c2d7786e | 78 | { |
3d80c99f SM |
79 | eassert (size > 0); |
80 | if (size < 2) | |
81 | return XINT (HASH_VALUE (log, start)); | |
82 | if (size < 3) | |
83 | /* Not an actual median, but better for our application than | |
84 | choosing either of the two numbers. */ | |
85 | return ((XINT (HASH_VALUE (log, start)) | |
86 | + XINT (HASH_VALUE (log, start + 1))) | |
87 | / 2); | |
c2d7786e | 88 | else |
c2d7786e | 89 | { |
3d80c99f SM |
90 | ptrdiff_t newsize = size / 3; |
91 | ptrdiff_t start2 = start + newsize; | |
92 | EMACS_INT i1 = approximate_median (log, start, newsize); | |
93 | EMACS_INT i2 = approximate_median (log, start2, newsize); | |
94 | EMACS_INT i3 = approximate_median (log, start2 + newsize, | |
95 | size - 2 * newsize); | |
96 | return (i1 < i2 | |
97 | ? (i2 < i3 ? i2 : (i1 < i3 ? i3 : i1)) | |
98 | : (i1 < i3 ? i1 : (i2 < i3 ? i3 : i2))); | |
c2d7786e | 99 | } |
c2d7786e TM |
100 | } |
101 | ||
3d80c99f | 102 | static void evict_lower_half (log_t *log) |
c2d7786e | 103 | { |
3d80c99f SM |
104 | ptrdiff_t size = ASIZE (log->key_and_value) / 2; |
105 | EMACS_INT median = approximate_median (log, 0, size); | |
106 | ptrdiff_t i; | |
c2d7786e | 107 | |
c2d7786e | 108 | for (i = 0; i < size; i++) |
3d80c99f SM |
109 | /* Evict not only values smaller but also values equal to the median, |
110 | so as to make sure we evict something no matter what. */ | |
111 | if (XINT (HASH_VALUE (log, i)) <= median) | |
112 | { | |
113 | Lisp_Object key = HASH_KEY (log, i); | |
114 | { /* FIXME: we could make this more efficient. */ | |
115 | Lisp_Object tmp; | |
116 | XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr. */ | |
117 | Fremhash (key, tmp); | |
118 | } | |
119 | eassert (EQ (log->next_free, make_number (i))); | |
120 | { | |
121 | int j; | |
122 | eassert (VECTORP (key)); | |
123 | for (j = 0; j < ASIZE (key); j++) | |
ad942b63 | 124 | ASET (key, j, Qnil); |
3d80c99f SM |
125 | } |
126 | set_hash_key_slot (log, i, key); | |
127 | } | |
c2d7786e TM |
128 | } |
129 | ||
d89460ed PE |
130 | /* Record the current backtrace in LOG. COUNT is the weight of this |
131 | current backtrace: milliseconds for CPU counts, and the allocation | |
132 | size for memory logs. */ | |
0efc778b | 133 | |
c2d7786e | 134 | static void |
d89460ed | 135 | record_backtrace (log_t *log, EMACS_INT count) |
c2d7786e | 136 | { |
c2d7786e | 137 | struct backtrace *backlist = backtrace_list; |
3d80c99f SM |
138 | Lisp_Object backtrace; |
139 | ptrdiff_t index, i = 0; | |
140 | ptrdiff_t asize; | |
c2d7786e | 141 | |
3d80c99f | 142 | if (!INTEGERP (log->next_free)) |
611b7507 JB |
143 | /* FIXME: transfer the evicted counts to a special entry rather |
144 | than dropping them on the floor. */ | |
3d80c99f SM |
145 | evict_lower_half (log); |
146 | index = XINT (log->next_free); | |
c2d7786e | 147 | |
3d80c99f SM |
148 | /* Get a "working memory" vector. */ |
149 | backtrace = HASH_KEY (log, index); | |
150 | asize = ASIZE (backtrace); | |
12b3895d | 151 | |
0efc778b | 152 | /* Copy the backtrace contents into working memory. */ |
3d80c99f | 153 | for (; i < asize && backlist; i++, backlist = backlist->next) |
611b7507 | 154 | /* FIXME: For closures we should ignore the environment. */ |
e7c1b6ef | 155 | ASET (backtrace, i, backlist->function); |
0efc778b | 156 | |
3d80c99f SM |
157 | /* Make sure that unused space of working memory is filled with nil. */ |
158 | for (; i < asize; i++) | |
159 | ASET (backtrace, i, Qnil); | |
c2d7786e | 160 | |
3d80c99f SM |
161 | { /* We basically do a `gethash+puthash' here, except that we have to be |
162 | careful to avoid memory allocation since we're in a signal | |
163 | handler, and we optimize the code to try and avoid computing the | |
164 | hash+lookup twice. See fns.c:Fputhash for reference. */ | |
165 | EMACS_UINT hash; | |
166 | ptrdiff_t j = hash_lookup (log, backtrace, &hash); | |
167 | if (j >= 0) | |
d89460ed PE |
168 | { |
169 | EMACS_INT old_val = XINT (HASH_VALUE (log, j)); | |
170 | EMACS_INT new_val = saturated_add (old_val, count); | |
171 | set_hash_value_slot (log, j, make_number (new_val)); | |
172 | } | |
3d80c99f SM |
173 | else |
174 | { /* BEWARE! hash_put in general can allocate memory. | |
175 | But currently it only does that if log->next_free is nil. */ | |
176 | int j; | |
177 | eassert (!NILP (log->next_free)); | |
178 | j = hash_put (log, backtrace, make_number (count), hash); | |
179 | /* Let's make sure we've put `backtrace' right where it | |
180 | already was to start with. */ | |
181 | eassert (index == j); | |
182 | ||
183 | /* FIXME: If the hash-table is almost full, we should set | |
184 | some global flag so that some Elisp code can offload its | |
611b7507 JB |
185 | data elsewhere, so as to avoid the eviction code. |
186 | There are 2 ways to do that, AFAICT: | |
187 | - Set a flag checked in QUIT, such that QUIT can then call | |
188 | Fprofiler_cpu_log and stash the full log for later use. | |
189 | - Set a flag check in post-gc-hook, so that Elisp code can call | |
190 | profiler-cpu-log. That gives us more flexibility since that | |
191 | Elisp code can then do all kinds of fun stuff like write | |
192 | the log to disk. Or turn it right away into a call tree. | |
193 | Of course, using Elisp is generally preferable, but it may | |
194 | take longer until we get a chance to run the Elisp code, so | |
195 | there's more risk that the table will get full before we | |
196 | get there. */ | |
3d80c99f SM |
197 | } |
198 | } | |
c2d7786e | 199 | } |
c2d7786e | 200 | \f |
0efc778b | 201 | /* Sample profiler. */ |
c2d7786e | 202 | |
3a880af4 | 203 | /* FIXME: Add support for the CPU profiler in W32. */ |
3a880af4 | 204 | |
d89460ed PE |
205 | #ifdef PROFILER_CPU_SUPPORT |
206 | ||
207 | /* The profiler timer and whether it was properly initialized, if | |
208 | POSIX timers are available. */ | |
209 | #ifdef HAVE_TIMER_SETTIME | |
210 | static timer_t profiler_timer; | |
211 | static bool profiler_timer_ok; | |
212 | #endif | |
ad942b63 | 213 | |
d89460ed PE |
214 | /* Status of sampling profiler. */ |
215 | static enum profiler_cpu_running | |
216 | { NOT_RUNNING, TIMER_SETTIME_RUNNING, SETITIMER_RUNNING } | |
217 | profiler_cpu_running; | |
6521894d | 218 | |
d89460ed | 219 | /* Hash-table log of CPU profiler. */ |
3d80c99f | 220 | static Lisp_Object cpu_log; |
d89460ed | 221 | |
3d80c99f SM |
222 | /* Separate counter for the time spent in the GC. */ |
223 | static EMACS_INT cpu_gc_count; | |
0efc778b | 224 | |
d89460ed PE |
225 | /* The current sample interval in milliseconds. */ |
226 | static EMACS_INT current_sample_interval; | |
c2d7786e | 227 | |
6521894d SM |
228 | /* Signal handler for sample profiler. */ |
229 | ||
230 | static void | |
d89460ed | 231 | handle_profiler_signal (int signal) |
6521894d | 232 | { |
e7c1b6ef | 233 | if (backtrace_list && EQ (backtrace_list->function, Qautomatic_gc)) |
6521894d SM |
234 | /* Special case the time-count inside GC because the hash-table |
235 | code is not prepared to be used while the GC is running. | |
236 | More specifically it uses ASIZE at many places where it does | |
237 | not expect the ARRAY_MARK_FLAG to be set. We could try and | |
238 | harden the hash-table code, but it doesn't seem worth the | |
239 | effort. */ | |
d89460ed | 240 | cpu_gc_count = saturated_add (cpu_gc_count, current_sample_interval); |
6521894d | 241 | else |
d89460ed PE |
242 | { |
243 | eassert (HASH_TABLE_P (cpu_log)); | |
244 | record_backtrace (XHASH_TABLE (cpu_log), current_sample_interval); | |
245 | } | |
6521894d SM |
246 | } |
247 | ||
704d3f45 | 248 | static void |
d89460ed PE |
249 | deliver_profiler_signal (int signal) |
250 | { | |
251 | deliver_process_signal (signal, handle_profiler_signal); | |
252 | } | |
253 | ||
254 | static enum profiler_cpu_running | |
255 | setup_cpu_timer (Lisp_Object sample_interval) | |
704d3f45 | 256 | { |
d89460ed PE |
257 | struct sigaction action; |
258 | struct itimerval timer; | |
259 | struct timespec interval; | |
260 | ||
261 | if (! RANGED_INTEGERP (1, sample_interval, | |
262 | (TYPE_MAXIMUM (time_t) < EMACS_INT_MAX / 1000 | |
263 | ? (EMACS_INT) TYPE_MAXIMUM (time_t) * 1000 + 999 | |
264 | : EMACS_INT_MAX))) | |
265 | return NOT_RUNNING; | |
266 | ||
267 | current_sample_interval = XINT (sample_interval); | |
268 | interval = make_emacs_time (current_sample_interval / 1000, | |
269 | current_sample_interval % 1000 * 1000000); | |
270 | emacs_sigaction_init (&action, deliver_profiler_signal); | |
271 | sigaction (SIGPROF, &action, 0); | |
272 | ||
273 | #ifdef HAVE_TIMER_SETTIME | |
274 | if (! profiler_timer_ok) | |
275 | { | |
276 | /* System clocks to try, in decreasing order of desirability. */ | |
277 | static clockid_t const system_clock[] = { | |
278 | #ifdef CLOCK_THREAD_CPUTIME_ID | |
279 | CLOCK_THREAD_CPUTIME_ID, | |
280 | #endif | |
281 | #ifdef CLOCK_PROCESS_CPUTIME_ID | |
282 | CLOCK_PROCESS_CPUTIME_ID, | |
283 | #endif | |
284 | #ifdef CLOCK_MONOTONIC | |
285 | CLOCK_MONOTONIC, | |
286 | #endif | |
287 | CLOCK_REALTIME | |
288 | }; | |
289 | int i; | |
290 | struct sigevent sigev; | |
291 | sigev.sigev_value.sival_ptr = &profiler_timer; | |
292 | sigev.sigev_signo = SIGPROF; | |
293 | sigev.sigev_notify = SIGEV_SIGNAL; | |
294 | ||
295 | for (i = 0; i < sizeof system_clock / sizeof *system_clock; i++) | |
296 | if (timer_create (system_clock[i], &sigev, &profiler_timer) == 0) | |
297 | { | |
298 | profiler_timer_ok = 1; | |
299 | break; | |
300 | } | |
301 | } | |
302 | ||
303 | if (profiler_timer_ok) | |
304 | { | |
305 | struct itimerspec ispec; | |
306 | ispec.it_value = ispec.it_interval = interval; | |
307 | timer_settime (profiler_timer, 0, &ispec, 0); | |
308 | return TIMER_SETTIME_RUNNING; | |
309 | } | |
310 | #endif | |
311 | ||
312 | timer.it_value = timer.it_interval = make_timeval (interval); | |
313 | setitimer (ITIMER_PROF, &timer, 0); | |
314 | return SETITIMER_RUNNING; | |
704d3f45 TM |
315 | } |
316 | ||
6521894d | 317 | DEFUN ("profiler-cpu-start", Fprofiler_cpu_start, Sprofiler_cpu_start, |
c2d7786e | 318 | 1, 1, 0, |
6521894d | 319 | doc: /* Start or restart the cpu profiler. |
d89460ed | 320 | It takes call-stack samples each SAMPLE-INTERVAL milliseconds. |
6521894d | 321 | See also `profiler-log-size' and `profiler-max-stack-depth'. */) |
c2d7786e TM |
322 | (Lisp_Object sample_interval) |
323 | { | |
6521894d | 324 | if (profiler_cpu_running) |
c2d7786e TM |
325 | error ("Sample profiler is already running"); |
326 | ||
3d80c99f SM |
327 | if (NILP (cpu_log)) |
328 | { | |
329 | cpu_gc_count = 0; | |
6521894d | 330 | cpu_log = make_log (profiler_log_size, |
3d80c99f SM |
331 | profiler_max_stack_depth); |
332 | } | |
c2d7786e | 333 | |
d89460ed PE |
334 | profiler_cpu_running = setup_cpu_timer (sample_interval); |
335 | if (! profiler_cpu_running) | |
336 | error ("Invalid sample interval"); | |
c2d7786e TM |
337 | |
338 | return Qt; | |
339 | } | |
340 | ||
6521894d | 341 | DEFUN ("profiler-cpu-stop", Fprofiler_cpu_stop, Sprofiler_cpu_stop, |
c2d7786e | 342 | 0, 0, 0, |
234148bf SM |
343 | doc: /* Stop the cpu profiler. The profiler log is not affected. |
344 | Return non-nil if the profiler was running. */) | |
c2d7786e TM |
345 | (void) |
346 | { | |
d89460ed PE |
347 | switch (profiler_cpu_running) |
348 | { | |
349 | case NOT_RUNNING: | |
350 | return Qnil; | |
351 | ||
84f72efd | 352 | #ifdef HAVE_TIMER_SETTIME |
d89460ed PE |
353 | case TIMER_SETTIME_RUNNING: |
354 | { | |
355 | struct itimerspec disable; | |
356 | memset (&disable, 0, sizeof disable); | |
357 | timer_settime (profiler_timer, 0, &disable, 0); | |
358 | } | |
359 | break; | |
84f72efd | 360 | #endif |
c2d7786e | 361 | |
d89460ed PE |
362 | case SETITIMER_RUNNING: |
363 | { | |
364 | struct itimerval disable; | |
365 | memset (&disable, 0, sizeof disable); | |
366 | setitimer (ITIMER_PROF, &disable, 0); | |
367 | } | |
368 | break; | |
369 | } | |
c2d7786e | 370 | |
d89460ed PE |
371 | signal (SIGPROF, SIG_IGN); |
372 | profiler_cpu_running = NOT_RUNNING; | |
c2d7786e TM |
373 | return Qt; |
374 | } | |
375 | ||
6521894d SM |
376 | DEFUN ("profiler-cpu-running-p", |
377 | Fprofiler_cpu_running_p, Sprofiler_cpu_running_p, | |
c2d7786e | 378 | 0, 0, 0, |
6521894d | 379 | doc: /* Return non-nil iff cpu profiler is running. */) |
c2d7786e TM |
380 | (void) |
381 | { | |
6521894d | 382 | return profiler_cpu_running ? Qt : Qnil; |
c2d7786e TM |
383 | } |
384 | ||
6521894d | 385 | DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log, |
c2d7786e | 386 | 0, 0, 0, |
6521894d SM |
387 | doc: /* Return the current cpu profiler log. |
388 | The log is a hash-table mapping backtraces to counters which represent | |
389 | the amount of time spent at those points. Every backtrace is a vector | |
390 | of functions, where the last few elements may be nil. | |
391 | Before returning, a new log is allocated for future samples. */) | |
c2d7786e TM |
392 | (void) |
393 | { | |
3d80c99f | 394 | Lisp_Object result = cpu_log; |
d89460ed | 395 | /* Here we're making the log visible to Elisp, so it's not safe any |
3d80c99f SM |
396 | more for our use afterwards since we can't rely on its special |
397 | pre-allocated keys anymore. So we have to allocate a new one. */ | |
6521894d SM |
398 | cpu_log = (profiler_cpu_running |
399 | ? make_log (profiler_log_size, profiler_max_stack_depth) | |
3d80c99f SM |
400 | : Qnil); |
401 | Fputhash (Fmake_vector (make_number (1), Qautomatic_gc), | |
402 | make_number (cpu_gc_count), | |
403 | result); | |
404 | cpu_gc_count = 0; | |
c2d7786e TM |
405 | return result; |
406 | } | |
d89460ed | 407 | #endif /* PROFILER_CPU_SUPPORT */ |
c2d7786e | 408 | \f |
0efc778b | 409 | /* Memory profiler. */ |
c2d7786e | 410 | |
6521894d SM |
411 | /* True if memory profiler is running. */ |
412 | bool profiler_memory_running; | |
413 | ||
3d80c99f | 414 | static Lisp_Object memory_log; |
c2d7786e | 415 | |
6521894d | 416 | DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start, |
c2d7786e | 417 | 0, 0, 0, |
6521894d SM |
418 | doc: /* Start/restart the memory profiler. |
419 | The memory profiler will take samples of the call-stack whenever a new | |
420 | allocation takes place. Note that most small allocations only trigger | |
421 | the profiler occasionally. | |
422 | See also `profiler-log-size' and `profiler-max-stack-depth'. */) | |
c2d7786e TM |
423 | (void) |
424 | { | |
6521894d | 425 | if (profiler_memory_running) |
c2d7786e TM |
426 | error ("Memory profiler is already running"); |
427 | ||
3d80c99f | 428 | if (NILP (memory_log)) |
6521894d | 429 | memory_log = make_log (profiler_log_size, |
c2d7786e TM |
430 | profiler_max_stack_depth); |
431 | ||
234148bf | 432 | profiler_memory_running = true; |
c2d7786e TM |
433 | |
434 | return Qt; | |
435 | } | |
436 | ||
6521894d SM |
437 | DEFUN ("profiler-memory-stop", |
438 | Fprofiler_memory_stop, Sprofiler_memory_stop, | |
c2d7786e | 439 | 0, 0, 0, |
234148bf SM |
440 | doc: /* Stop the memory profiler. The profiler log is not affected. |
441 | Return non-nil if the profiler was running. */) | |
c2d7786e TM |
442 | (void) |
443 | { | |
6521894d | 444 | if (!profiler_memory_running) |
234148bf SM |
445 | return Qnil; |
446 | profiler_memory_running = false; | |
c2d7786e TM |
447 | return Qt; |
448 | } | |
449 | ||
6521894d SM |
450 | DEFUN ("profiler-memory-running-p", |
451 | Fprofiler_memory_running_p, Sprofiler_memory_running_p, | |
c2d7786e | 452 | 0, 0, 0, |
6521894d | 453 | doc: /* Return non-nil if memory profiler is running. */) |
c2d7786e TM |
454 | (void) |
455 | { | |
6521894d | 456 | return profiler_memory_running ? Qt : Qnil; |
c2d7786e TM |
457 | } |
458 | ||
6521894d SM |
459 | DEFUN ("profiler-memory-log", |
460 | Fprofiler_memory_log, Sprofiler_memory_log, | |
c2d7786e | 461 | 0, 0, 0, |
6521894d SM |
462 | doc: /* Return the current memory profiler log. |
463 | The log is a hash-table mapping backtraces to counters which represent | |
464 | the amount of memory allocated at those points. Every backtrace is a vector | |
465 | of functions, where the last few elements may be nil. | |
466 | Before returning, a new log is allocated for future samples. */) | |
c2d7786e TM |
467 | (void) |
468 | { | |
3d80c99f SM |
469 | Lisp_Object result = memory_log; |
470 | /* Here we're making the log visible to Elisp , so it's not safe any | |
471 | more for our use afterwards since we can't rely on its special | |
472 | pre-allocated keys anymore. So we have to allocate a new one. */ | |
6521894d SM |
473 | memory_log = (profiler_memory_running |
474 | ? make_log (profiler_log_size, profiler_max_stack_depth) | |
3d80c99f | 475 | : Qnil); |
c2d7786e TM |
476 | return result; |
477 | } | |
478 | ||
479 | \f | |
0efc778b | 480 | /* Signals and probes. */ |
c2d7786e | 481 | |
0efc778b | 482 | /* Record that the current backtrace allocated SIZE bytes. */ |
c2d7786e TM |
483 | void |
484 | malloc_probe (size_t size) | |
485 | { | |
ad942b63 | 486 | eassert (HASH_TABLE_P (memory_log)); |
d89460ed | 487 | record_backtrace (XHASH_TABLE (memory_log), min (size, MOST_POSITIVE_FIXNUM)); |
c2d7786e TM |
488 | } |
489 | ||
490 | void | |
491 | syms_of_profiler (void) | |
492 | { | |
493 | DEFVAR_INT ("profiler-max-stack-depth", profiler_max_stack_depth, | |
6521894d | 494 | doc: /* Number of elements from the call-stack recorded in the log. */); |
c2d7786e | 495 | profiler_max_stack_depth = 16; |
6521894d SM |
496 | DEFVAR_INT ("profiler-log-size", profiler_log_size, |
497 | doc: /* Number of distinct call-stacks that can be recorded in a profiler log. | |
498 | If the log gets full, some of the least-seen call-stacks will be evicted | |
499 | to make room for new entries. */); | |
500 | profiler_log_size = 10000; | |
c2d7786e | 501 | |
ad942b63 | 502 | #ifdef PROFILER_CPU_SUPPORT |
d89460ed | 503 | profiler_cpu_running = NOT_RUNNING; |
ad942b63 SM |
504 | cpu_log = Qnil; |
505 | staticpro (&cpu_log); | |
6521894d SM |
506 | defsubr (&Sprofiler_cpu_start); |
507 | defsubr (&Sprofiler_cpu_stop); | |
508 | defsubr (&Sprofiler_cpu_running_p); | |
509 | defsubr (&Sprofiler_cpu_log); | |
ad942b63 | 510 | #endif |
234148bf | 511 | profiler_memory_running = false; |
ad942b63 SM |
512 | memory_log = Qnil; |
513 | staticpro (&memory_log); | |
6521894d SM |
514 | defsubr (&Sprofiler_memory_start); |
515 | defsubr (&Sprofiler_memory_stop); | |
516 | defsubr (&Sprofiler_memory_running_p); | |
517 | defsubr (&Sprofiler_memory_log); | |
c2d7786e | 518 | } |