aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorPo Lu2023-03-11 07:53:45 +0800
committerPo Lu2023-03-11 07:53:45 +0800
commit769a4e7ff51c370b055a964ae96e2ace43fc1936 (patch)
treec067ea5aa92eeb31af2be075f7800932cbe43ec1 /src
parent1eb546309b24f41b124a0f94aee4009c6dbd8580 (diff)
parentd236ab09300070696f21ebfda49678b11c2327eb (diff)
downloademacs-769a4e7ff51c370b055a964ae96e2ace43fc1936.tar.gz
emacs-769a4e7ff51c370b055a964ae96e2ace43fc1936.zip
Merge remote-tracking branch 'origin/master' into feature/android
Diffstat (limited to 'src')
-rw-r--r--src/profiler.c145
1 files changed, 79 insertions, 66 deletions
diff --git a/src/profiler.c b/src/profiler.c
index 8247b2e90c6..6217071ef9c 100644
--- a/src/profiler.c
+++ b/src/profiler.c
@@ -49,7 +49,13 @@ static const struct hash_table_test hashtest_profiler =
49 hashfn_profiler, 49 hashfn_profiler,
50 }; 50 };
51 51
52static Lisp_Object 52struct profiler_log {
53 Lisp_Object log;
54 EMACS_INT gc_count; /* Samples taken during GC. */
55 EMACS_INT discarded; /* Samples evicted during table overflow. */
56};
57
58static struct profiler_log
53make_log (void) 59make_log (void)
54{ 60{
55 /* We use a standard Elisp hash-table object, but we use it in 61 /* We use a standard Elisp hash-table object, but we use it in
@@ -60,11 +66,13 @@ make_log (void)
60 = clip_to_bounds (0, profiler_log_size, MOST_POSITIVE_FIXNUM); 66 = clip_to_bounds (0, profiler_log_size, MOST_POSITIVE_FIXNUM);
61 ptrdiff_t max_stack_depth 67 ptrdiff_t max_stack_depth
62 = clip_to_bounds (0, profiler_max_stack_depth, PTRDIFF_MAX);; 68 = clip_to_bounds (0, profiler_max_stack_depth, PTRDIFF_MAX);;
63 Lisp_Object log = make_hash_table (hashtest_profiler, heap_size, 69 struct profiler_log log
64 DEFAULT_REHASH_SIZE, 70 = { make_hash_table (hashtest_profiler, heap_size,
65 DEFAULT_REHASH_THRESHOLD, 71 DEFAULT_REHASH_SIZE,
66 Qnil, false); 72 DEFAULT_REHASH_THRESHOLD,
67 struct Lisp_Hash_Table *h = XHASH_TABLE (log); 73 Qnil, false),
74 0, 0 };
75 struct Lisp_Hash_Table *h = XHASH_TABLE (log.log);
68 76
69 /* What is special about our hash-tables is that the values are pre-filled 77 /* What is special about our hash-tables is that the values are pre-filled
70 with the vectors we'll use as keys. */ 78 with the vectors we'll use as keys. */
@@ -116,8 +124,9 @@ static EMACS_INT approximate_median (log_t *log,
116 } 124 }
117} 125}
118 126
119static void evict_lower_half (log_t *log) 127static void evict_lower_half (struct profiler_log *plog)
120{ 128{
129 log_t *log = XHASH_TABLE (plog->log);
121 ptrdiff_t size = ASIZE (log->key_and_value) / 2; 130 ptrdiff_t size = ASIZE (log->key_and_value) / 2;
122 EMACS_INT median = approximate_median (log, 0, size); 131 EMACS_INT median = approximate_median (log, 0, size);
123 132
@@ -127,6 +136,8 @@ static void evict_lower_half (log_t *log)
127 if (XFIXNUM (HASH_VALUE (log, i)) <= median) 136 if (XFIXNUM (HASH_VALUE (log, i)) <= median)
128 { 137 {
129 Lisp_Object key = HASH_KEY (log, i); 138 Lisp_Object key = HASH_KEY (log, i);
139 EMACS_INT count = XFIXNUM (HASH_VALUE (log, i));
140 plog->discarded = saturated_add (plog->discarded, count);
130 { /* FIXME: we could make this more efficient. */ 141 { /* FIXME: we could make this more efficient. */
131 Lisp_Object tmp; 142 Lisp_Object tmp;
132 XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr. */ 143 XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr. */
@@ -148,12 +159,12 @@ static void evict_lower_half (log_t *log)
148 size for memory. */ 159 size for memory. */
149 160
150static void 161static void
151record_backtrace (log_t *log, EMACS_INT count) 162record_backtrace (struct profiler_log *plog, EMACS_INT count)
152{ 163{
164 eassert (HASH_TABLE_P (plog->log));
165 log_t *log = XHASH_TABLE (plog->log);
153 if (log->next_free < 0) 166 if (log->next_free < 0)
154 /* FIXME: transfer the evicted counts to a special entry rather 167 evict_lower_half (plog);
155 than dropping them on the floor. */
156 evict_lower_half (log);
157 ptrdiff_t index = log->next_free; 168 ptrdiff_t index = log->next_free;
158 169
159 /* Get a "working memory" vector. */ 170 /* Get a "working memory" vector. */
@@ -222,10 +233,10 @@ static enum profiler_cpu_running
222 profiler_cpu_running; 233 profiler_cpu_running;
223 234
224/* Hash-table log of CPU profiler. */ 235/* Hash-table log of CPU profiler. */
225static Lisp_Object cpu_log; 236static struct profiler_log cpu;
226 237
227/* Separate counter for the time spent in the GC. */ 238/* Hash-table log of Memory profiler. */
228static EMACS_INT cpu_gc_count; 239static struct profiler_log memory;
229 240
230/* The current sampling interval in nanoseconds. */ 241/* The current sampling interval in nanoseconds. */
231static EMACS_INT current_sampling_interval; 242static EMACS_INT current_sampling_interval;
@@ -233,30 +244,34 @@ static EMACS_INT current_sampling_interval;
233/* Signal handler for sampling profiler. */ 244/* Signal handler for sampling profiler. */
234 245
235static void 246static void
236handle_profiler_signal (int signal) 247add_sample (struct profiler_log *plog, EMACS_INT count)
237{ 248{
238 if (EQ (backtrace_top_function (), QAutomatic_GC)) 249 if (EQ (backtrace_top_function (), QAutomatic_GC)) /* bug#60237 */
239 /* Special case the time-count inside GC because the hash-table 250 /* Special case the time-count inside GC because the hash-table
240 code is not prepared to be used while the GC is running. 251 code is not prepared to be used while the GC is running.
241 More specifically it uses ASIZE at many places where it does 252 More specifically it uses ASIZE at many places where it does
242 not expect the ARRAY_MARK_FLAG to be set. We could try and 253 not expect the ARRAY_MARK_FLAG to be set. We could try and
243 harden the hash-table code, but it doesn't seem worth the 254 harden the hash-table code, but it doesn't seem worth the
244 effort. */ 255 effort. */
245 cpu_gc_count = saturated_add (cpu_gc_count, 1); 256 plog->gc_count = saturated_add (plog->gc_count, count);
246 else 257 else
247 { 258 record_backtrace (plog, count);
248 EMACS_INT count = 1; 259}
260
261
262static void
263handle_profiler_signal (int signal)
264{
265 EMACS_INT count = 1;
249#if defined HAVE_ITIMERSPEC && defined HAVE_TIMER_GETOVERRUN 266#if defined HAVE_ITIMERSPEC && defined HAVE_TIMER_GETOVERRUN
250 if (profiler_timer_ok) 267 if (profiler_timer_ok)
251 { 268 {
252 int overruns = timer_getoverrun (profiler_timer); 269 int overruns = timer_getoverrun (profiler_timer);
253 eassert (overruns >= 0); 270 eassert (overruns >= 0);
254 count += overruns; 271 count += overruns;
255 }
256#endif
257 eassert (HASH_TABLE_P (cpu_log));
258 record_backtrace (XHASH_TABLE (cpu_log), count);
259 } 272 }
273#endif
274 add_sample (&cpu, count);
260} 275}
261 276
262static void 277static void
@@ -343,11 +358,8 @@ See also `profiler-log-size' and `profiler-max-stack-depth'. */)
343 if (profiler_cpu_running) 358 if (profiler_cpu_running)
344 error ("CPU profiler is already running"); 359 error ("CPU profiler is already running");
345 360
346 if (NILP (cpu_log)) 361 if (NILP (cpu.log))
347 { 362 cpu = make_log ();
348 cpu_gc_count = 0;
349 cpu_log = make_log ();
350 }
351 363
352 int status = setup_cpu_timer (sampling_interval); 364 int status = setup_cpu_timer (sampling_interval);
353 if (status < 0) 365 if (status < 0)
@@ -409,6 +421,26 @@ DEFUN ("profiler-cpu-running-p",
409 return profiler_cpu_running ? Qt : Qnil; 421 return profiler_cpu_running ? Qt : Qnil;
410} 422}
411 423
424static Lisp_Object
425export_log (struct profiler_log *log)
426{
427 Lisp_Object result = log->log;
428 if (log->gc_count)
429 Fputhash (CALLN (Fvector, QAutomatic_GC, Qnil),
430 make_fixnum (log->gc_count),
431 result);
432 if (log->discarded)
433 Fputhash (CALLN (Fvector, QDiscarded_Samples, Qnil),
434 make_fixnum (log->discarded),
435 result);
436 /* Here we're making the log visible to Elisp, so it's not safe any
437 more for our use afterwards since we can't rely on its special
438 pre-allocated keys anymore. So we have to allocate a new one. */
439 if (profiler_cpu_running)
440 *log = make_log ();
441 return result;
442}
443
412DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log, 444DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log,
413 0, 0, 0, 445 0, 0, 0,
414 doc: /* Return the current cpu profiler log. 446 doc: /* Return the current cpu profiler log.
@@ -418,16 +450,7 @@ of functions, where the last few elements may be nil.
418Before returning, a new log is allocated for future samples. */) 450Before returning, a new log is allocated for future samples. */)
419 (void) 451 (void)
420{ 452{
421 Lisp_Object result = cpu_log; 453 return (export_log (&cpu));
422 /* Here we're making the log visible to Elisp, so it's not safe any
423 more for our use afterwards since we can't rely on its special
424 pre-allocated keys anymore. So we have to allocate a new one. */
425 cpu_log = profiler_cpu_running ? make_log () : Qnil;
426 Fputhash (make_vector (1, QAutomatic_GC),
427 make_fixnum (cpu_gc_count),
428 result);
429 cpu_gc_count = 0;
430 return result;
431} 454}
432#endif /* PROFILER_CPU_SUPPORT */ 455#endif /* PROFILER_CPU_SUPPORT */
433 456
@@ -436,8 +459,6 @@ Before returning, a new log is allocated for future samples. */)
436/* True if memory profiler is running. */ 459/* True if memory profiler is running. */
437bool profiler_memory_running; 460bool profiler_memory_running;
438 461
439static Lisp_Object memory_log;
440
441DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start, 462DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start,
442 0, 0, 0, 463 0, 0, 0,
443 doc: /* Start/restart the memory profiler. 464 doc: /* Start/restart the memory profiler.
@@ -450,8 +471,8 @@ See also `profiler-log-size' and `profiler-max-stack-depth'. */)
450 if (profiler_memory_running) 471 if (profiler_memory_running)
451 error ("Memory profiler is already running"); 472 error ("Memory profiler is already running");
452 473
453 if (NILP (memory_log)) 474 if (NILP (memory.log))
454 memory_log = make_log (); 475 memory = make_log ();
455 476
456 profiler_memory_running = true; 477 profiler_memory_running = true;
457 478
@@ -490,12 +511,7 @@ of functions, where the last few elements may be nil.
490Before returning, a new log is allocated for future samples. */) 511Before returning, a new log is allocated for future samples. */)
491 (void) 512 (void)
492{ 513{
493 Lisp_Object result = memory_log; 514 return (export_log (&memory));
494 /* Here we're making the log visible to Elisp , so it's not safe any
495 more for our use afterwards since we can't rely on its special
496 pre-allocated keys anymore. So we have to allocate a new one. */
497 memory_log = profiler_memory_running ? make_log () : Qnil;
498 return result;
499} 515}
500 516
501 517
@@ -505,11 +521,7 @@ Before returning, a new log is allocated for future samples. */)
505void 521void
506malloc_probe (size_t size) 522malloc_probe (size_t size)
507{ 523{
508 if (EQ (backtrace_top_function (), QAutomatic_GC)) /* bug#60237 */ 524 add_sample (&memory, min (size, MOST_POSITIVE_FIXNUM));
509 /* FIXME: We should do something like what we did with `cpu_gc_count`. */
510 return;
511 eassert (HASH_TABLE_P (memory_log));
512 record_backtrace (XHASH_TABLE (memory_log), min (size, MOST_POSITIVE_FIXNUM));
513} 525}
514 526
515DEFUN ("function-equal", Ffunction_equal, Sfunction_equal, 2, 2, 0, 527DEFUN ("function-equal", Ffunction_equal, Sfunction_equal, 2, 2, 0,
@@ -589,21 +601,22 @@ to make room for new entries. */);
589 profiler_log_size = 10000; 601 profiler_log_size = 10000;
590 602
591 DEFSYM (Qprofiler_backtrace_equal, "profiler-backtrace-equal"); 603 DEFSYM (Qprofiler_backtrace_equal, "profiler-backtrace-equal");
604 DEFSYM (QDiscarded_Samples, "Discarded Samples");
592 605
593 defsubr (&Sfunction_equal); 606 defsubr (&Sfunction_equal);
594 607
595#ifdef PROFILER_CPU_SUPPORT 608#ifdef PROFILER_CPU_SUPPORT
596 profiler_cpu_running = NOT_RUNNING; 609 profiler_cpu_running = NOT_RUNNING;
597 cpu_log = Qnil; 610 cpu.log = Qnil;
598 staticpro (&cpu_log); 611 staticpro (&cpu.log);
599 defsubr (&Sprofiler_cpu_start); 612 defsubr (&Sprofiler_cpu_start);
600 defsubr (&Sprofiler_cpu_stop); 613 defsubr (&Sprofiler_cpu_stop);
601 defsubr (&Sprofiler_cpu_running_p); 614 defsubr (&Sprofiler_cpu_running_p);
602 defsubr (&Sprofiler_cpu_log); 615 defsubr (&Sprofiler_cpu_log);
603#endif 616#endif
604 profiler_memory_running = false; 617 profiler_memory_running = false;
605 memory_log = Qnil; 618 memory.log = Qnil;
606 staticpro (&memory_log); 619 staticpro (&memory.log);
607 defsubr (&Sprofiler_memory_start); 620 defsubr (&Sprofiler_memory_start);
608 defsubr (&Sprofiler_memory_stop); 621 defsubr (&Sprofiler_memory_stop);
609 defsubr (&Sprofiler_memory_running_p); 622 defsubr (&Sprofiler_memory_running_p);
@@ -618,16 +631,16 @@ syms_of_profiler_for_pdumper (void)
618 if (dumped_with_pdumper_p ()) 631 if (dumped_with_pdumper_p ())
619 { 632 {
620#ifdef PROFILER_CPU_SUPPORT 633#ifdef PROFILER_CPU_SUPPORT
621 cpu_log = Qnil; 634 cpu.log = Qnil;
622#endif 635#endif
623 memory_log = Qnil; 636 memory.log = Qnil;
624 } 637 }
625 else 638 else
626 { 639 {
627#ifdef PROFILER_CPU_SUPPORT 640#ifdef PROFILER_CPU_SUPPORT
628 eassert (NILP (cpu_log)); 641 eassert (NILP (cpu.log));
629#endif 642#endif
630 eassert (NILP (memory_log)); 643 eassert (NILP (memory.log));
631 } 644 }
632 645
633} 646}