aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorEli Zaretskii2023-03-10 14:54:52 -0500
committerStefan Monnier2023-03-10 14:54:52 -0500
commit9a5f2ac97ecd5f434690f04ed0b6573d2dd58148 (patch)
treeca954e389a06235afaf82aabca342afcbac39608 /src
parentd5d2959217f7afc99f2636cafdb8ffe00e14dfae (diff)
downloademacs-9a5f2ac97ecd5f434690f04ed0b6573d2dd58148.tar.gz
emacs-9a5f2ac97ecd5f434690f04ed0b6573d2dd58148.zip
src/profiler.c: Keep track of allocations during GC
Cargo-cult the `cpu_gc_count` code to `memory_gc_count`. * src/profiler.c (mem_gc_count): New var. (Fprofiler_memory_start): Initialize it. (Fprofiler_memory_log): Increment it. (Fprofiler_memory_log): Use it.
Diffstat (limited to 'src')
-rw-r--r--src/profiler.c29
1 files changed, 24 insertions, 5 deletions
diff --git a/src/profiler.c b/src/profiler.c
index 8247b2e90c6..92d8a0aea1c 100644
--- a/src/profiler.c
+++ b/src/profiler.c
@@ -227,6 +227,9 @@ static Lisp_Object cpu_log;
227/* Separate counter for the time spent in the GC. */ 227/* Separate counter for the time spent in the GC. */
228static EMACS_INT cpu_gc_count; 228static EMACS_INT cpu_gc_count;
229 229
230/* Separate counter for the memory allocations during GC. */
231static EMACS_INT mem_gc_count;
232
230/* The current sampling interval in nanoseconds. */ 233/* The current sampling interval in nanoseconds. */
231static EMACS_INT current_sampling_interval; 234static EMACS_INT current_sampling_interval;
232 235
@@ -451,7 +454,10 @@ See also `profiler-log-size' and `profiler-max-stack-depth'. */)
451 error ("Memory profiler is already running"); 454 error ("Memory profiler is already running");
452 455
453 if (NILP (memory_log)) 456 if (NILP (memory_log))
454 memory_log = make_log (); 457 {
458 mem_gc_count = 0;
459 memory_log = make_log ();
460 }
455 461
456 profiler_memory_running = true; 462 profiler_memory_running = true;
457 463
@@ -495,6 +501,10 @@ Before returning, a new log is allocated for future samples. */)
495 more for our use afterwards since we can't rely on its special 501 more for our use afterwards since we can't rely on its special
496 pre-allocated keys anymore. So we have to allocate a new one. */ 502 pre-allocated keys anymore. So we have to allocate a new one. */
497 memory_log = profiler_memory_running ? make_log () : Qnil; 503 memory_log = profiler_memory_running ? make_log () : Qnil;
504 Fputhash (make_vector (1, QAutomatic_GC),
505 make_fixnum (mem_gc_count),
506 result);
507 mem_gc_count = 0;
498 return result; 508 return result;
499} 509}
500 510
@@ -506,10 +516,19 @@ void
506malloc_probe (size_t size) 516malloc_probe (size_t size)
507{ 517{
508 if (EQ (backtrace_top_function (), QAutomatic_GC)) /* bug#60237 */ 518 if (EQ (backtrace_top_function (), QAutomatic_GC)) /* bug#60237 */
509 /* FIXME: We should do something like what we did with `cpu_gc_count`. */ 519 /* Special case the malloc-count inside GC because the hash-table
510 return; 520 code is not prepared to be used while the GC is running.
511 eassert (HASH_TABLE_P (memory_log)); 521 More specifically it uses ASIZE at many places where it does
512 record_backtrace (XHASH_TABLE (memory_log), min (size, MOST_POSITIVE_FIXNUM)); 522 not expect the ARRAY_MARK_FLAG to be set. We could try and
523 harden the hash-table code, but it doesn't seem worth the
524 effort. */
525 mem_gc_count = saturated_add (mem_gc_count, 1);
526 else
527 {
528 eassert (HASH_TABLE_P (memory_log));
529 record_backtrace (XHASH_TABLE (memory_log),
530 min (size, MOST_POSITIVE_FIXNUM));
531 }
513} 532}
514 533
515DEFUN ("function-equal", Ffunction_equal, Sfunction_equal, 2, 2, 0, 534DEFUN ("function-equal", Ffunction_equal, Sfunction_equal, 2, 2, 0,