guile-elisp bootstrap part (C)
[bpt/emacs.git] / src / profiler.c
1 /* Profiler implementation.
2
3 Copyright (C) 2012-2014 Free Software Foundation, Inc.
4
5 This file is part of GNU Emacs.
6
7 GNU Emacs is free software: you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation, either version 3 of the License, or
10 (at your option) any later version.
11
12 GNU Emacs is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include <config.h>
21 #include "lisp.h"
22 #include "syssignal.h"
23 #include "systime.h"
24
25 /* Return A + B, but return the maximum fixnum if the result would overflow.
26 Assume A and B are nonnegative and in fixnum range. */
27
28 static EMACS_INT
29 saturated_add (EMACS_INT a, EMACS_INT b)
30 {
31 return min (a + b, MOST_POSITIVE_FIXNUM);
32 }
33
34 /* Logs. */
35
36 typedef struct Lisp_Hash_Table log_t;
37
38 static Lisp_Object Qautomatic_gc;
39 static Lisp_Object Qprofiler_backtrace_equal;
40 static struct hash_table_test hashtest_profiler;
41
42 static Lisp_Object
43 make_log (int heap_size, int max_stack_depth)
44 {
45 /* We use a standard Elisp hash-table object, but we use it in
46 a special way. This is OK as long as the object is not exposed
47 to Elisp, i.e. until it is returned by *-profiler-log, after which
48 it can't be used any more. */
49 Lisp_Object log = make_hash_table (hashtest_profiler,
50 make_number (heap_size),
51 make_float (DEFAULT_REHASH_SIZE),
52 make_float (DEFAULT_REHASH_THRESHOLD),
53 Qnil);
54 struct Lisp_Hash_Table *h = XHASH_TABLE (log);
55
56 /* What is special about our hash-tables is that the keys are pre-filled
57 with the vectors we'll put in them. */
58 int i = ASIZE (h->key_and_value) / 2;
59 while (i > 0)
60 set_hash_key_slot (h, --i,
61 Fmake_vector (make_number (max_stack_depth), Qnil));
62 return log;
63 }
64
65 /* Evict the least used half of the hash_table.
66
67 When the table is full, we have to evict someone.
68 The easiest and most efficient is to evict the value we're about to add
69 (i.e. once the table is full, stop sampling).
70
71 We could also pick the element with the lowest count and evict it,
72 but finding it is O(N) and for that amount of work we get very
73 little in return: for the next sample, this latest sample will have
74 count==1 and will hence be a prime candidate for eviction :-(
75
76 So instead, we take O(N) time to eliminate more or less half of the
77 entries (the half with the lowest counts). So we get an amortized
78 cost of O(1) and we get O(N) time for a new entry to grow larger
79 than the other least counts before a new round of eviction. */
80
81 static EMACS_INT approximate_median (log_t *log,
82 ptrdiff_t start, ptrdiff_t size)
83 {
84 eassert (size > 0);
85 if (size < 2)
86 return XINT (HASH_VALUE (log, start));
87 if (size < 3)
88 /* Not an actual median, but better for our application than
89 choosing either of the two numbers. */
90 return ((XINT (HASH_VALUE (log, start))
91 + XINT (HASH_VALUE (log, start + 1)))
92 / 2);
93 else
94 {
95 ptrdiff_t newsize = size / 3;
96 ptrdiff_t start2 = start + newsize;
97 EMACS_INT i1 = approximate_median (log, start, newsize);
98 EMACS_INT i2 = approximate_median (log, start2, newsize);
99 EMACS_INT i3 = approximate_median (log, start2 + newsize,
100 size - 2 * newsize);
101 return (i1 < i2
102 ? (i2 < i3 ? i2 : (i1 < i3 ? i3 : i1))
103 : (i1 < i3 ? i1 : (i2 < i3 ? i3 : i2)));
104 }
105 }
106
107 static void evict_lower_half (log_t *log)
108 {
109 ptrdiff_t size = ASIZE (log->key_and_value) / 2;
110 EMACS_INT median = approximate_median (log, 0, size);
111 ptrdiff_t i;
112
113 for (i = 0; i < size; i++)
114 /* Evict not only values smaller but also values equal to the median,
115 so as to make sure we evict something no matter what. */
116 if (XINT (HASH_VALUE (log, i)) <= median)
117 {
118 Lisp_Object key = HASH_KEY (log, i);
119 { /* FIXME: we could make this more efficient. */
120 Lisp_Object tmp;
121 XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr. */
122 Fremhash (key, tmp);
123 }
124 eassert (EQ (log->next_free, make_number (i)));
125 {
126 int j;
127 eassert (VECTORP (key));
128 for (j = 0; j < ASIZE (key); j++)
129 ASET (key, j, Qnil);
130 }
131 set_hash_key_slot (log, i, key);
132 }
133 }
134
135 /* Record the current backtrace in LOG. COUNT is the weight of this
136 current backtrace: interrupt counts for CPU, and the allocation
137 size for memory. */
138
139 static void
140 record_backtrace (log_t *log, EMACS_INT count)
141 {
142 Lisp_Object backtrace;
143 ptrdiff_t index;
144
145 if (!INTEGERP (log->next_free))
146 /* FIXME: transfer the evicted counts to a special entry rather
147 than dropping them on the floor. */
148 evict_lower_half (log);
149 index = XINT (log->next_free);
150
151 /* Get a "working memory" vector. */
152 backtrace = HASH_KEY (log, index);
153 get_backtrace (backtrace);
154
155 { /* We basically do a `gethash+puthash' here, except that we have to be
156 careful to avoid memory allocation since we're in a signal
157 handler, and we optimize the code to try and avoid computing the
158 hash+lookup twice. See fns.c:Fputhash for reference. */
159 EMACS_UINT hash;
160 ptrdiff_t j = hash_lookup (log, backtrace, &hash);
161 if (j >= 0)
162 {
163 EMACS_INT old_val = XINT (HASH_VALUE (log, j));
164 EMACS_INT new_val = saturated_add (old_val, count);
165 set_hash_value_slot (log, j, make_number (new_val));
166 }
167 else
168 { /* BEWARE! hash_put in general can allocate memory.
169 But currently it only does that if log->next_free is nil. */
170 int j;
171 eassert (!NILP (log->next_free));
172 j = hash_put (log, backtrace, make_number (count), hash);
173 /* Let's make sure we've put `backtrace' right where it
174 already was to start with. */
175 eassert (index == j);
176
177 /* FIXME: If the hash-table is almost full, we should set
178 some global flag so that some Elisp code can offload its
179 data elsewhere, so as to avoid the eviction code.
180 There are 2 ways to do that, AFAICT:
181 - Set a flag checked in QUIT, such that QUIT can then call
182 Fprofiler_cpu_log and stash the full log for later use.
183 - Set a flag check in post-gc-hook, so that Elisp code can call
184 profiler-cpu-log. That gives us more flexibility since that
185 Elisp code can then do all kinds of fun stuff like write
186 the log to disk. Or turn it right away into a call tree.
187 Of course, using Elisp is generally preferable, but it may
188 take longer until we get a chance to run the Elisp code, so
189 there's more risk that the table will get full before we
190 get there. */
191 }
192 }
193 }
194 \f
195 /* Sampling profiler. */
196
197 #ifdef PROFILER_CPU_SUPPORT
198
199 /* The profiler timer and whether it was properly initialized, if
200 POSIX timers are available. */
201 #ifdef HAVE_ITIMERSPEC
202 static timer_t profiler_timer;
203 static bool profiler_timer_ok;
204 #endif
205
206 /* Status of sampling profiler. */
207 static enum profiler_cpu_running
208 { NOT_RUNNING, TIMER_SETTIME_RUNNING, SETITIMER_RUNNING }
209 profiler_cpu_running;
210
211 /* Hash-table log of CPU profiler. */
212 static Lisp_Object cpu_log;
213
214 /* Separate counter for the time spent in the GC. */
215 static EMACS_INT cpu_gc_count;
216
217 /* The current sampling interval in nanoseconds. */
218 static EMACS_INT current_sampling_interval;
219
220 /* Signal handler for sampling profiler. */
221
222 static void
223 handle_profiler_signal (int signal)
224 {
225 if (EQ (backtrace_top_function (), Qautomatic_gc))
226 /* Special case the time-count inside GC because the hash-table
227 code is not prepared to be used while the GC is running.
228 More specifically it uses ASIZE at many places where it does
229 not expect the ARRAY_MARK_FLAG to be set. We could try and
230 harden the hash-table code, but it doesn't seem worth the
231 effort. */
232 cpu_gc_count = saturated_add (cpu_gc_count, 1);
233 else
234 {
235 EMACS_INT count = 1;
236 #ifdef HAVE_ITIMERSPEC
237 if (profiler_timer_ok)
238 {
239 int overruns = timer_getoverrun (profiler_timer);
240 eassert (overruns >= 0);
241 count += overruns;
242 }
243 #endif
244 eassert (HASH_TABLE_P (cpu_log));
245 record_backtrace (XHASH_TABLE (cpu_log), count);
246 }
247 }
248
249 static void
250 deliver_profiler_signal (int signal)
251 {
252 deliver_process_signal (signal, handle_profiler_signal);
253 }
254
255 static enum profiler_cpu_running
256 setup_cpu_timer (Lisp_Object sampling_interval)
257 {
258 struct sigaction action;
259 struct itimerval timer;
260 struct timespec interval;
261 int billion = 1000000000;
262
263 if (! RANGED_INTEGERP (1, sampling_interval,
264 (TYPE_MAXIMUM (time_t) < EMACS_INT_MAX / billion
265 ? ((EMACS_INT) TYPE_MAXIMUM (time_t) * billion
266 + (billion - 1))
267 : EMACS_INT_MAX)))
268 return NOT_RUNNING;
269
270 current_sampling_interval = XINT (sampling_interval);
271 interval = make_timespec (current_sampling_interval / billion,
272 current_sampling_interval % billion);
273 emacs_sigaction_init (&action, deliver_profiler_signal);
274 sigaction (SIGPROF, &action, 0);
275
276 #ifdef HAVE_ITIMERSPEC
277 if (! profiler_timer_ok)
278 {
279 /* System clocks to try, in decreasing order of desirability. */
280 static clockid_t const system_clock[] = {
281 #ifdef CLOCK_THREAD_CPUTIME_ID
282 CLOCK_THREAD_CPUTIME_ID,
283 #endif
284 #ifdef CLOCK_PROCESS_CPUTIME_ID
285 CLOCK_PROCESS_CPUTIME_ID,
286 #endif
287 #ifdef CLOCK_MONOTONIC
288 CLOCK_MONOTONIC,
289 #endif
290 CLOCK_REALTIME
291 };
292 int i;
293 struct sigevent sigev;
294 sigev.sigev_value.sival_ptr = &profiler_timer;
295 sigev.sigev_signo = SIGPROF;
296 sigev.sigev_notify = SIGEV_SIGNAL;
297
298 for (i = 0; i < ARRAYELTS (system_clock); i++)
299 if (timer_create (system_clock[i], &sigev, &profiler_timer) == 0)
300 {
301 profiler_timer_ok = 1;
302 break;
303 }
304 }
305
306 if (profiler_timer_ok)
307 {
308 struct itimerspec ispec;
309 ispec.it_value = ispec.it_interval = interval;
310 if (timer_settime (profiler_timer, 0, &ispec, 0) == 0)
311 return TIMER_SETTIME_RUNNING;
312 }
313 #endif
314
315 #ifdef HAVE_SETITIMER
316 timer.it_value = timer.it_interval = make_timeval (interval);
317 if (setitimer (ITIMER_PROF, &timer, 0) == 0)
318 return SETITIMER_RUNNING;
319 #endif
320
321 return NOT_RUNNING;
322 }
323
324 DEFUN ("profiler-cpu-start", Fprofiler_cpu_start, Sprofiler_cpu_start,
325 1, 1, 0,
326 doc: /* Start or restart the cpu profiler.
327 It takes call-stack samples each SAMPLING-INTERVAL nanoseconds, approximately.
328 See also `profiler-log-size' and `profiler-max-stack-depth'. */)
329 (Lisp_Object sampling_interval)
330 {
331 if (profiler_cpu_running)
332 error ("CPU profiler is already running");
333
334 if (NILP (cpu_log))
335 {
336 cpu_gc_count = 0;
337 cpu_log = make_log (profiler_log_size,
338 profiler_max_stack_depth);
339 }
340
341 profiler_cpu_running = setup_cpu_timer (sampling_interval);
342 if (! profiler_cpu_running)
343 error ("Invalid sampling interval");
344
345 return Qt;
346 }
347
348 DEFUN ("profiler-cpu-stop", Fprofiler_cpu_stop, Sprofiler_cpu_stop,
349 0, 0, 0,
350 doc: /* Stop the cpu profiler. The profiler log is not affected.
351 Return non-nil if the profiler was running. */)
352 (void)
353 {
354 switch (profiler_cpu_running)
355 {
356 case NOT_RUNNING:
357 return Qnil;
358
359 #ifdef HAVE_ITIMERSPEC
360 case TIMER_SETTIME_RUNNING:
361 {
362 struct itimerspec disable;
363 memset (&disable, 0, sizeof disable);
364 timer_settime (profiler_timer, 0, &disable, 0);
365 }
366 break;
367 #endif
368
369 #ifdef HAVE_SETITIMER
370 case SETITIMER_RUNNING:
371 {
372 struct itimerval disable;
373 memset (&disable, 0, sizeof disable);
374 setitimer (ITIMER_PROF, &disable, 0);
375 }
376 break;
377 #endif
378 }
379
380 signal (SIGPROF, SIG_IGN);
381 profiler_cpu_running = NOT_RUNNING;
382 return Qt;
383 }
384
385 DEFUN ("profiler-cpu-running-p",
386 Fprofiler_cpu_running_p, Sprofiler_cpu_running_p,
387 0, 0, 0,
388 doc: /* Return non-nil if cpu profiler is running. */)
389 (void)
390 {
391 return profiler_cpu_running ? Qt : Qnil;
392 }
393
394 DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log,
395 0, 0, 0,
396 doc: /* Return the current cpu profiler log.
397 The log is a hash-table mapping backtraces to counters which represent
398 the amount of time spent at those points. Every backtrace is a vector
399 of functions, where the last few elements may be nil.
400 Before returning, a new log is allocated for future samples. */)
401 (void)
402 {
403 Lisp_Object result = cpu_log;
404 /* Here we're making the log visible to Elisp, so it's not safe any
405 more for our use afterwards since we can't rely on its special
406 pre-allocated keys anymore. So we have to allocate a new one. */
407 cpu_log = (profiler_cpu_running
408 ? make_log (profiler_log_size, profiler_max_stack_depth)
409 : Qnil);
410 Fputhash (Fmake_vector (make_number (1), Qautomatic_gc),
411 make_number (cpu_gc_count),
412 result);
413 cpu_gc_count = 0;
414 return result;
415 }
416 #endif /* PROFILER_CPU_SUPPORT */
417 \f
418 /* Memory profiler. */
419
420 /* True if memory profiler is running. */
421 bool profiler_memory_running;
422
423 static Lisp_Object memory_log;
424
425 DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start,
426 0, 0, 0,
427 doc: /* Start/restart the memory profiler.
428 The memory profiler will take samples of the call-stack whenever a new
429 allocation takes place. Note that most small allocations only trigger
430 the profiler occasionally.
431 See also `profiler-log-size' and `profiler-max-stack-depth'. */)
432 (void)
433 {
434 if (profiler_memory_running)
435 error ("Memory profiler is already running");
436
437 if (NILP (memory_log))
438 memory_log = make_log (profiler_log_size,
439 profiler_max_stack_depth);
440
441 profiler_memory_running = true;
442
443 return Qt;
444 }
445
446 DEFUN ("profiler-memory-stop",
447 Fprofiler_memory_stop, Sprofiler_memory_stop,
448 0, 0, 0,
449 doc: /* Stop the memory profiler. The profiler log is not affected.
450 Return non-nil if the profiler was running. */)
451 (void)
452 {
453 if (!profiler_memory_running)
454 return Qnil;
455 profiler_memory_running = false;
456 return Qt;
457 }
458
459 DEFUN ("profiler-memory-running-p",
460 Fprofiler_memory_running_p, Sprofiler_memory_running_p,
461 0, 0, 0,
462 doc: /* Return non-nil if memory profiler is running. */)
463 (void)
464 {
465 return profiler_memory_running ? Qt : Qnil;
466 }
467
468 DEFUN ("profiler-memory-log",
469 Fprofiler_memory_log, Sprofiler_memory_log,
470 0, 0, 0,
471 doc: /* Return the current memory profiler log.
472 The log is a hash-table mapping backtraces to counters which represent
473 the amount of memory allocated at those points. Every backtrace is a vector
474 of functions, where the last few elements may be nil.
475 Before returning, a new log is allocated for future samples. */)
476 (void)
477 {
478 Lisp_Object result = memory_log;
479 /* Here we're making the log visible to Elisp , so it's not safe any
480 more for our use afterwards since we can't rely on its special
481 pre-allocated keys anymore. So we have to allocate a new one. */
482 memory_log = (profiler_memory_running
483 ? make_log (profiler_log_size, profiler_max_stack_depth)
484 : Qnil);
485 return result;
486 }
487
488 \f
489 /* Signals and probes. */
490
491 /* Record that the current backtrace allocated SIZE bytes. */
492 void
493 malloc_probe (size_t size)
494 {
495 eassert (HASH_TABLE_P (memory_log));
496 record_backtrace (XHASH_TABLE (memory_log), min (size, MOST_POSITIVE_FIXNUM));
497 }
498
499 DEFUN ("function-equal", Ffunction_equal, Sfunction_equal, 2, 2, 0,
500 doc: /* Return non-nil if F1 and F2 come from the same source.
501 Used to determine if different closures are just different instances of
502 the same lambda expression, or are really unrelated function. */)
503 (Lisp_Object f1, Lisp_Object f2)
504 {
505 bool res;
506 if (EQ (f1, f2))
507 res = true;
508 else if (COMPILEDP (f1) && COMPILEDP (f2))
509 res = EQ (AREF (f1, COMPILED_BYTECODE), AREF (f2, COMPILED_BYTECODE));
510 else if (CONSP (f1) && CONSP (f2) && CONSP (XCDR (f1)) && CONSP (XCDR (f2))
511 && EQ (Qclosure, XCAR (f1))
512 && EQ (Qclosure, XCAR (f2)))
513 res = EQ (XCDR (XCDR (f1)), XCDR (XCDR (f2)));
514 else
515 res = false;
516 return res ? Qt : Qnil;
517 }
518
519 static bool
520 cmpfn_profiler (struct hash_table_test *t,
521 Lisp_Object bt1, Lisp_Object bt2)
522 {
523 if (VECTORP (bt1) && VECTORP (bt2))
524 {
525 ptrdiff_t i, l = ASIZE (bt1);
526 if (l != ASIZE (bt2))
527 return false;
528 for (i = 0; i < l; i++)
529 if (NILP (Ffunction_equal (AREF (bt1, i), AREF (bt2, i))))
530 return false;
531 return true;
532 }
533 else
534 return EQ (bt1, bt2);
535 }
536
537 static EMACS_UINT
538 hashfn_profiler (struct hash_table_test *ht, Lisp_Object bt)
539 {
540 if (VECTORP (bt))
541 {
542 EMACS_UINT hash = 0;
543 ptrdiff_t i, l = ASIZE (bt);
544 for (i = 0; i < l; i++)
545 {
546 Lisp_Object f = AREF (bt, i);
547 EMACS_UINT hash1
548 = (COMPILEDP (f) ? XHASH (AREF (f, COMPILED_BYTECODE))
549 : (CONSP (f) && CONSP (XCDR (f)) && EQ (Qclosure, XCAR (f)))
550 ? XHASH (XCDR (XCDR (f))) : XHASH (f));
551 hash = sxhash_combine (hash, hash1);
552 }
553 return SXHASH_REDUCE (hash);
554 }
555 else
556 return XHASH (bt);
557 }
558
559 void
560 syms_of_profiler (void)
561 {
562 #include "profiler.x"
563
564 DEFVAR_INT ("profiler-max-stack-depth", profiler_max_stack_depth,
565 doc: /* Number of elements from the call-stack recorded in the log. */);
566 profiler_max_stack_depth = 16;
567 DEFVAR_INT ("profiler-log-size", profiler_log_size,
568 doc: /* Number of distinct call-stacks that can be recorded in a profiler log.
569 If the log gets full, some of the least-seen call-stacks will be evicted
570 to make room for new entries. */);
571 profiler_log_size = 10000;
572
573 DEFSYM (Qautomatic_gc, "Automatic GC");
574 DEFSYM (Qprofiler_backtrace_equal, "profiler-backtrace-equal");
575
576 hashtest_profiler.name = Qprofiler_backtrace_equal;
577 hashtest_profiler.user_hash_function = Qnil;
578 hashtest_profiler.user_cmp_function = Qnil;
579 hashtest_profiler.cmpfn = cmpfn_profiler;
580 hashtest_profiler.hashfn = hashfn_profiler;
581
582 #ifdef PROFILER_CPU_SUPPORT
583 profiler_cpu_running = NOT_RUNNING;
584 cpu_log = Qnil;
585 staticpro (&cpu_log);
586 #endif
587 profiler_memory_running = false;
588 memory_log = Qnil;
589 staticpro (&memory_log);
590 }