aboutsummaryrefslogtreecommitdiffstats
path: root/src/alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/alloc.c')
-rw-r--r--src/alloc.c200
1 files changed, 93 insertions, 107 deletions
diff --git a/src/alloc.c b/src/alloc.c
index 2d490f3bb75..9fbd0d05739 100644
--- a/src/alloc.c
+++ b/src/alloc.c
@@ -224,7 +224,7 @@ struct emacs_globals globals;
224 224
225/* maybe_gc collects garbage if this goes negative. */ 225/* maybe_gc collects garbage if this goes negative. */
226 226
227intmax_t consing_until_gc; 227EMACS_INT consing_until_gc;
228 228
229#ifdef HAVE_PDUMPER 229#ifdef HAVE_PDUMPER
230/* Number of finalizers run: used to loop over GC until we stop 230/* Number of finalizers run: used to loop over GC until we stop
@@ -238,10 +238,17 @@ bool gc_in_progress;
238 238
239/* System byte and object counts reported by GC. */ 239/* System byte and object counts reported by GC. */
240 240
241/* Assume byte counts fit in uintptr_t and object counts fit into
242 intptr_t. */
241typedef uintptr_t byte_ct; 243typedef uintptr_t byte_ct;
242typedef intptr_t object_ct; 244typedef intptr_t object_ct;
243 245
244/* Number of live and free conses etc. */ 246/* Large-magnitude value for a threshold count, which fits in EMACS_INT.
247 Using only half the EMACS_INT range avoids overflow hassles.
248 There is no need to fit these counts into fixnums. */
249#define HI_THRESHOLD (EMACS_INT_MAX / 2)
250
251/* Number of live and free conses etc. counted by the most-recent GC. */
245 252
246static struct gcstat 253static struct gcstat
247{ 254{
@@ -299,7 +306,7 @@ static intptr_t garbage_collection_inhibited;
299 306
300/* The GC threshold in bytes, the last time it was calculated 307/* The GC threshold in bytes, the last time it was calculated
301 from gc-cons-threshold and gc-cons-percentage. */ 308 from gc-cons-threshold and gc-cons-percentage. */
302static intmax_t gc_threshold; 309static EMACS_INT gc_threshold;
303 310
304/* If nonzero, this is a warning delivered by malloc and not yet 311/* If nonzero, this is a warning delivered by malloc and not yet
305 displayed. */ 312 displayed. */
@@ -536,6 +543,15 @@ XFLOAT_INIT (Lisp_Object f, double n)
536 XFLOAT (f)->u.data = n; 543 XFLOAT (f)->u.data = n;
537} 544}
538 545
546/* Account for allocation of NBYTES in the heap. This is a separate
547 function to avoid hassles with implementation-defined conversion
548 from unsigned to signed types. */
549static void
550tally_consing (ptrdiff_t nbytes)
551{
552 consing_until_gc -= nbytes;
553}
554
539#ifdef DOUG_LEA_MALLOC 555#ifdef DOUG_LEA_MALLOC
540static bool 556static bool
541pointers_fit_in_lispobj_p (void) 557pointers_fit_in_lispobj_p (void)
@@ -560,7 +576,7 @@ struct Lisp_Finalizer finalizers;
560 576
561/* Head of a circularly-linked list of finalizers that must be invoked 577/* Head of a circularly-linked list of finalizers that must be invoked
562 because we deemed them unreachable. This list must be global, and 578 because we deemed them unreachable. This list must be global, and
563 not a local inside garbage_collect_1, in case we GC again while 579 not a local inside garbage_collect, in case we GC again while
564 running finalizers. */ 580 running finalizers. */
565struct Lisp_Finalizer doomed_finalizers; 581struct Lisp_Finalizer doomed_finalizers;
566 582
@@ -1366,16 +1382,14 @@ make_interval (void)
1366 newi->next = interval_block; 1382 newi->next = interval_block;
1367 interval_block = newi; 1383 interval_block = newi;
1368 interval_block_index = 0; 1384 interval_block_index = 0;
1369 gcstat.total_free_intervals += INTERVAL_BLOCK_SIZE;
1370 } 1385 }
1371 val = &interval_block->intervals[interval_block_index++]; 1386 val = &interval_block->intervals[interval_block_index++];
1372 } 1387 }
1373 1388
1374 MALLOC_UNBLOCK_INPUT; 1389 MALLOC_UNBLOCK_INPUT;
1375 1390
1376 consing_until_gc -= sizeof (struct interval); 1391 tally_consing (sizeof (struct interval));
1377 intervals_consed++; 1392 intervals_consed++;
1378 gcstat.total_free_intervals--;
1379 RESET_INTERVAL (val); 1393 RESET_INTERVAL (val);
1380 val->gcmarkbit = 0; 1394 val->gcmarkbit = 0;
1381 return val; 1395 return val;
@@ -1730,8 +1744,6 @@ allocate_string (void)
1730 NEXT_FREE_LISP_STRING (s) = string_free_list; 1744 NEXT_FREE_LISP_STRING (s) = string_free_list;
1731 string_free_list = ptr_bounds_clip (s, sizeof *s); 1745 string_free_list = ptr_bounds_clip (s, sizeof *s);
1732 } 1746 }
1733
1734 gcstat.total_free_strings += STRING_BLOCK_SIZE;
1735 } 1747 }
1736 1748
1737 check_string_free_list (); 1749 check_string_free_list ();
@@ -1742,10 +1754,8 @@ allocate_string (void)
1742 1754
1743 MALLOC_UNBLOCK_INPUT; 1755 MALLOC_UNBLOCK_INPUT;
1744 1756
1745 gcstat.total_free_strings--;
1746 gcstat.total_strings++;
1747 ++strings_consed; 1757 ++strings_consed;
1748 consing_until_gc -= sizeof *s; 1758 tally_consing (sizeof *s);
1749 1759
1750#ifdef GC_CHECK_STRING_BYTES 1760#ifdef GC_CHECK_STRING_BYTES
1751 if (!noninteractive) 1761 if (!noninteractive)
@@ -1865,7 +1875,7 @@ allocate_string_data (struct Lisp_String *s,
1865 old_data->string = NULL; 1875 old_data->string = NULL;
1866 } 1876 }
1867 1877
1868 consing_until_gc -= needed; 1878 tally_consing (needed);
1869} 1879}
1870 1880
1871 1881
@@ -2461,7 +2471,6 @@ make_float (double float_value)
2461 memset (new->gcmarkbits, 0, sizeof new->gcmarkbits); 2471 memset (new->gcmarkbits, 0, sizeof new->gcmarkbits);
2462 float_block = new; 2472 float_block = new;
2463 float_block_index = 0; 2473 float_block_index = 0;
2464 gcstat.total_free_floats += FLOAT_BLOCK_SIZE;
2465 } 2474 }
2466 XSETFLOAT (val, &float_block->floats[float_block_index]); 2475 XSETFLOAT (val, &float_block->floats[float_block_index]);
2467 float_block_index++; 2476 float_block_index++;
@@ -2471,9 +2480,8 @@ make_float (double float_value)
2471 2480
2472 XFLOAT_INIT (val, float_value); 2481 XFLOAT_INIT (val, float_value);
2473 eassert (!XFLOAT_MARKED_P (XFLOAT (val))); 2482 eassert (!XFLOAT_MARKED_P (XFLOAT (val)));
2474 consing_until_gc -= sizeof (struct Lisp_Float); 2483 tally_consing (sizeof (struct Lisp_Float));
2475 floats_consed++; 2484 floats_consed++;
2476 gcstat.total_free_floats--;
2477 return val; 2485 return val;
2478} 2486}
2479 2487
@@ -2543,9 +2551,8 @@ free_cons (struct Lisp_Cons *ptr)
2543 ptr->u.s.u.chain = cons_free_list; 2551 ptr->u.s.u.chain = cons_free_list;
2544 ptr->u.s.car = dead_object (); 2552 ptr->u.s.car = dead_object ();
2545 cons_free_list = ptr; 2553 cons_free_list = ptr;
2546 if (INT_ADD_WRAPV (consing_until_gc, sizeof *ptr, &consing_until_gc)) 2554 ptrdiff_t nbytes = sizeof *ptr;
2547 consing_until_gc = INTMAX_MAX; 2555 tally_consing (-nbytes);
2548 gcstat.total_free_conses++;
2549} 2556}
2550 2557
2551DEFUN ("cons", Fcons, Scons, 2, 2, 0, 2558DEFUN ("cons", Fcons, Scons, 2, 2, 0,
@@ -2565,26 +2572,12 @@ DEFUN ("cons", Fcons, Scons, 2, 2, 0,
2565 { 2572 {
2566 if (cons_block_index == CONS_BLOCK_SIZE) 2573 if (cons_block_index == CONS_BLOCK_SIZE)
2567 { 2574 {
2568 /* Maximum number of conses that should be active at any
2569 given time, so that list lengths fit into a ptrdiff_t and
2570 into a fixnum. */
2571 ptrdiff_t max_conses = min (PTRDIFF_MAX, MOST_POSITIVE_FIXNUM);
2572
2573 /* This check is typically optimized away, as a runtime
2574 check is needed only on weird platforms where a count of
2575 distinct conses might not fit. */
2576 if (max_conses < INTPTR_MAX / sizeof (struct Lisp_Cons)
2577 && (max_conses - CONS_BLOCK_SIZE
2578 < gcstat.total_free_conses + gcstat.total_conses))
2579 memory_full (sizeof (struct cons_block));
2580
2581 struct cons_block *new 2575 struct cons_block *new
2582 = lisp_align_malloc (sizeof *new, MEM_TYPE_CONS); 2576 = lisp_align_malloc (sizeof *new, MEM_TYPE_CONS);
2583 memset (new->gcmarkbits, 0, sizeof new->gcmarkbits); 2577 memset (new->gcmarkbits, 0, sizeof new->gcmarkbits);
2584 new->next = cons_block; 2578 new->next = cons_block;
2585 cons_block = new; 2579 cons_block = new;
2586 cons_block_index = 0; 2580 cons_block_index = 0;
2587 gcstat.total_free_conses += CONS_BLOCK_SIZE;
2588 } 2581 }
2589 XSETCONS (val, &cons_block->conses[cons_block_index]); 2582 XSETCONS (val, &cons_block->conses[cons_block_index]);
2590 cons_block_index++; 2583 cons_block_index++;
@@ -2596,7 +2589,6 @@ DEFUN ("cons", Fcons, Scons, 2, 2, 0,
2596 XSETCDR (val, cdr); 2589 XSETCDR (val, cdr);
2597 eassert (!XCONS_MARKED_P (XCONS (val))); 2590 eassert (!XCONS_MARKED_P (XCONS (val)));
2598 consing_until_gc -= sizeof (struct Lisp_Cons); 2591 consing_until_gc -= sizeof (struct Lisp_Cons);
2599 gcstat.total_free_conses--;
2600 cons_cells_consed++; 2592 cons_cells_consed++;
2601 return val; 2593 return val;
2602} 2594}
@@ -2855,7 +2847,6 @@ setup_on_free_list (struct Lisp_Vector *v, ptrdiff_t nbytes)
2855 eassert (vindex < VECTOR_MAX_FREE_LIST_INDEX); 2847 eassert (vindex < VECTOR_MAX_FREE_LIST_INDEX);
2856 set_next_vector (v, vector_free_lists[vindex]); 2848 set_next_vector (v, vector_free_lists[vindex]);
2857 vector_free_lists[vindex] = v; 2849 vector_free_lists[vindex] = v;
2858 gcstat.total_free_vector_slots += nbytes / word_size;
2859} 2850}
2860 2851
2861/* Get a new vector block. */ 2852/* Get a new vector block. */
@@ -2903,7 +2894,6 @@ allocate_vector_from_block (ptrdiff_t nbytes)
2903 { 2894 {
2904 vector = vector_free_lists[index]; 2895 vector = vector_free_lists[index];
2905 vector_free_lists[index] = next_vector (vector); 2896 vector_free_lists[index] = next_vector (vector);
2906 gcstat.total_free_vector_slots -= nbytes / word_size;
2907 return vector; 2897 return vector;
2908 } 2898 }
2909 2899
@@ -2917,7 +2907,6 @@ allocate_vector_from_block (ptrdiff_t nbytes)
2917 /* This vector is larger than requested. */ 2907 /* This vector is larger than requested. */
2918 vector = vector_free_lists[index]; 2908 vector = vector_free_lists[index];
2919 vector_free_lists[index] = next_vector (vector); 2909 vector_free_lists[index] = next_vector (vector);
2920 gcstat.total_free_vector_slots -= nbytes / word_size;
2921 2910
2922 /* Excess bytes are used for the smaller vector, 2911 /* Excess bytes are used for the smaller vector,
2923 which should be set on an appropriate free list. */ 2912 which should be set on an appropriate free list. */
@@ -3092,7 +3081,10 @@ sweep_vectors (void)
3092 space was coalesced into the only free vector. */ 3081 space was coalesced into the only free vector. */
3093 free_this_block = true; 3082 free_this_block = true;
3094 else 3083 else
3095 setup_on_free_list (vector, total_bytes); 3084 {
3085 setup_on_free_list (vector, total_bytes);
3086 gcstat.total_free_vector_slots += total_bytes / word_size;
3087 }
3096 } 3088 }
3097 } 3089 }
3098 3090
@@ -3177,7 +3169,7 @@ allocate_vectorlike (ptrdiff_t len)
3177 if (find_suspicious_object_in_range (p, (char *) p + nbytes)) 3169 if (find_suspicious_object_in_range (p, (char *) p + nbytes))
3178 emacs_abort (); 3170 emacs_abort ();
3179 3171
3180 consing_until_gc -= nbytes; 3172 tally_consing (nbytes);
3181 vector_cells_consed += len; 3173 vector_cells_consed += len;
3182 3174
3183 MALLOC_UNBLOCK_INPUT; 3175 MALLOC_UNBLOCK_INPUT;
@@ -3454,7 +3446,6 @@ Its value is void, and its function definition and property list are nil. */)
3454 new->next = symbol_block; 3446 new->next = symbol_block;
3455 symbol_block = new; 3447 symbol_block = new;
3456 symbol_block_index = 0; 3448 symbol_block_index = 0;
3457 gcstat.total_free_symbols += SYMBOL_BLOCK_SIZE;
3458 } 3449 }
3459 XSETSYMBOL (val, &symbol_block->symbols[symbol_block_index]); 3450 XSETSYMBOL (val, &symbol_block->symbols[symbol_block_index]);
3460 symbol_block_index++; 3451 symbol_block_index++;
@@ -3463,9 +3454,8 @@ Its value is void, and its function definition and property list are nil. */)
3463 MALLOC_UNBLOCK_INPUT; 3454 MALLOC_UNBLOCK_INPUT;
3464 3455
3465 init_symbol (val, name); 3456 init_symbol (val, name);
3466 consing_until_gc -= sizeof (struct Lisp_Symbol); 3457 tally_consing (sizeof (struct Lisp_Symbol));
3467 symbols_consed++; 3458 symbols_consed++;
3468 gcstat.total_free_symbols--;
3469 return val; 3459 return val;
3470} 3460}
3471 3461
@@ -5503,7 +5493,7 @@ staticpro (Lisp_Object const *varaddress)
5503static void 5493static void
5504allow_garbage_collection (intmax_t consing) 5494allow_garbage_collection (intmax_t consing)
5505{ 5495{
5506 consing_until_gc = consing - (INTMAX_MAX - consing_until_gc); 5496 consing_until_gc = consing - (HI_THRESHOLD - consing_until_gc);
5507 garbage_collection_inhibited--; 5497 garbage_collection_inhibited--;
5508} 5498}
5509 5499
@@ -5513,7 +5503,7 @@ inhibit_garbage_collection (void)
5513 ptrdiff_t count = SPECPDL_INDEX (); 5503 ptrdiff_t count = SPECPDL_INDEX ();
5514 record_unwind_protect_intmax (allow_garbage_collection, consing_until_gc); 5504 record_unwind_protect_intmax (allow_garbage_collection, consing_until_gc);
5515 garbage_collection_inhibited++; 5505 garbage_collection_inhibited++;
5516 consing_until_gc = INTMAX_MAX; 5506 consing_until_gc = HI_THRESHOLD;
5517 return count; 5507 return count;
5518} 5508}
5519 5509
@@ -5723,7 +5713,7 @@ visit_buffer_root (struct gc_root_visitor visitor,
5723 5713
5724 There are other GC roots of course, but these roots are dynamic 5714 There are other GC roots of course, but these roots are dynamic
5725 runtime data structures that pdump doesn't care about and so we can 5715 runtime data structures that pdump doesn't care about and so we can
5726 continue to mark those directly in garbage_collect_1. */ 5716 continue to mark those directly in garbage_collect. */
5727void 5717void
5728visit_static_gc_roots (struct gc_root_visitor visitor) 5718visit_static_gc_roots (struct gc_root_visitor visitor)
5729{ 5719{
@@ -5753,8 +5743,7 @@ mark_object_root_visitor (Lisp_Object const *root_ptr,
5753} 5743}
5754 5744
5755/* List of weak hash tables we found during marking the Lisp heap. 5745/* List of weak hash tables we found during marking the Lisp heap.
5756 Will be NULL on entry to garbage_collect_1 and after it 5746 NULL on entry to garbage_collect and after it returns. */
5757 returns. */
5758static struct Lisp_Hash_Table *weak_hash_tables; 5747static struct Lisp_Hash_Table *weak_hash_tables;
5759 5748
5760NO_INLINE /* For better stack traces */ 5749NO_INLINE /* For better stack traces */
@@ -5788,11 +5777,13 @@ mark_and_sweep_weak_table_contents (void)
5788 } 5777 }
5789} 5778}
5790 5779
5791/* Return the number of bytes to cons between GCs, assuming 5780/* Return the number of bytes to cons between GCs, given THRESHOLD and
5792 gc-cons-threshold is THRESHOLD and gc-cons-percentage is 5781 PERCENTAGE. When calculating a threshold based on PERCENTAGE,
5793 PERCENTAGE. */ 5782 assume SINCE_GC bytes have been allocated since the most recent GC.
5794static intmax_t 5783 The returned value is positive and no greater than HI_THRESHOLD. */
5795consing_threshold (intmax_t threshold, Lisp_Object percentage) 5784static EMACS_INT
5785consing_threshold (intmax_t threshold, Lisp_Object percentage,
5786 intmax_t since_gc)
5796{ 5787{
5797 if (!NILP (Vmemory_full)) 5788 if (!NILP (Vmemory_full))
5798 return memory_full_cons_threshold; 5789 return memory_full_cons_threshold;
@@ -5802,42 +5793,33 @@ consing_threshold (intmax_t threshold, Lisp_Object percentage)
5802 if (FLOATP (percentage)) 5793 if (FLOATP (percentage))
5803 { 5794 {
5804 double tot = (XFLOAT_DATA (percentage) 5795 double tot = (XFLOAT_DATA (percentage)
5805 * total_bytes_of_live_objects ()); 5796 * (total_bytes_of_live_objects () + since_gc));
5806 if (threshold < tot) 5797 if (threshold < tot)
5807 { 5798 {
5808 if (tot < INTMAX_MAX) 5799 if (tot < HI_THRESHOLD)
5809 threshold = tot; 5800 return tot;
5810 else 5801 else
5811 threshold = INTMAX_MAX; 5802 return HI_THRESHOLD;
5812 } 5803 }
5813 } 5804 }
5814 return threshold; 5805 return min (threshold, HI_THRESHOLD);
5815 } 5806 }
5816} 5807}
5817 5808
5818/* Adjust consing_until_gc, assuming gc-cons-threshold is THRESHOLD and 5809/* Adjust consing_until_gc and gc_threshold, given THRESHOLD and PERCENTAGE.
5819 gc-cons-percentage is PERCENTAGE. */ 5810 Return the updated consing_until_gc. */
5820static Lisp_Object 5811
5812static EMACS_INT
5821bump_consing_until_gc (intmax_t threshold, Lisp_Object percentage) 5813bump_consing_until_gc (intmax_t threshold, Lisp_Object percentage)
5822{ 5814{
5823 /* If consing_until_gc is negative leave it alone, since this prevents 5815 /* Guesstimate that half the bytes allocated since the most
5824 negative integer overflow and a GC would have been done soon anyway. */ 5816 recent GC are still in use. */
5825 if (0 <= consing_until_gc) 5817 EMACS_INT since_gc = (gc_threshold - consing_until_gc) >> 1;
5826 { 5818 EMACS_INT new_gc_threshold = consing_threshold (threshold, percentage,
5827 threshold = consing_threshold (threshold, percentage); 5819 since_gc);
5828 intmax_t sum; 5820 consing_until_gc += new_gc_threshold - gc_threshold;
5829 if (INT_ADD_WRAPV (consing_until_gc, threshold - gc_threshold, &sum)) 5821 gc_threshold = new_gc_threshold;
5830 { 5822 return consing_until_gc;
5831 /* Scale the threshold down so that consing_until_gc does
5832 not overflow. */
5833 sum = INTMAX_MAX;
5834 threshold = INTMAX_MAX - consing_until_gc + gc_threshold;
5835 }
5836 consing_until_gc = sum;
5837 gc_threshold = threshold;
5838 }
5839
5840 return Qnil;
5841} 5823}
5842 5824
5843/* Watch changes to gc-cons-threshold. */ 5825/* Watch changes to gc-cons-threshold. */
@@ -5848,7 +5830,8 @@ watch_gc_cons_threshold (Lisp_Object symbol, Lisp_Object newval,
5848 intmax_t threshold; 5830 intmax_t threshold;
5849 if (! (INTEGERP (newval) && integer_to_intmax (newval, &threshold))) 5831 if (! (INTEGERP (newval) && integer_to_intmax (newval, &threshold)))
5850 return Qnil; 5832 return Qnil;
5851 return bump_consing_until_gc (threshold, Vgc_cons_percentage); 5833 bump_consing_until_gc (threshold, Vgc_cons_percentage);
5834 return Qnil;
5852} 5835}
5853 5836
5854/* Watch changes to gc-cons-percentage. */ 5837/* Watch changes to gc-cons-percentage. */
@@ -5856,24 +5839,34 @@ static Lisp_Object
5856watch_gc_cons_percentage (Lisp_Object symbol, Lisp_Object newval, 5839watch_gc_cons_percentage (Lisp_Object symbol, Lisp_Object newval,
5857 Lisp_Object operation, Lisp_Object where) 5840 Lisp_Object operation, Lisp_Object where)
5858{ 5841{
5859 return bump_consing_until_gc (gc_cons_threshold, newval); 5842 bump_consing_until_gc (gc_cons_threshold, newval);
5843 return Qnil;
5844}
5845
5846/* It may be time to collect garbage. Recalculate consing_until_gc,
5847 since it might depend on current usage, and do the garbage
5848 collection if the recalculation says so. */
5849void
5850maybe_garbage_collect (void)
5851{
5852 if (bump_consing_until_gc (gc_cons_threshold, Vgc_cons_percentage) < 0)
5853 garbage_collect ();
5860} 5854}
5861 5855
5862/* Subroutine of Fgarbage_collect that does most of the work. */ 5856/* Subroutine of Fgarbage_collect that does most of the work. */
5863static bool 5857void
5864garbage_collect_1 (struct gcstat *gcst) 5858garbage_collect (void)
5865{ 5859{
5866 struct buffer *nextb; 5860 struct buffer *nextb;
5867 char stack_top_variable; 5861 char stack_top_variable;
5868 bool message_p; 5862 bool message_p;
5869 ptrdiff_t count = SPECPDL_INDEX (); 5863 ptrdiff_t count = SPECPDL_INDEX ();
5870 struct timespec start; 5864 struct timespec start;
5871 byte_ct tot_before = 0;
5872 5865
5873 eassert (weak_hash_tables == NULL); 5866 eassert (weak_hash_tables == NULL);
5874 5867
5875 if (garbage_collection_inhibited) 5868 if (garbage_collection_inhibited)
5876 return false; 5869 return;
5877 5870
5878 /* Record this function, so it appears on the profiler's backtraces. */ 5871 /* Record this function, so it appears on the profiler's backtraces. */
5879 record_in_backtrace (QAutomatic_GC, 0, 0); 5872 record_in_backtrace (QAutomatic_GC, 0, 0);
@@ -5883,14 +5876,15 @@ garbage_collect_1 (struct gcstat *gcst)
5883 FOR_EACH_BUFFER (nextb) 5876 FOR_EACH_BUFFER (nextb)
5884 compact_buffer (nextb); 5877 compact_buffer (nextb);
5885 5878
5886 if (profiler_memory_running) 5879 byte_ct tot_before = (profiler_memory_running
5887 tot_before = total_bytes_of_live_objects (); 5880 ? total_bytes_of_live_objects ()
5881 : (byte_ct) -1);
5888 5882
5889 start = current_timespec (); 5883 start = current_timespec ();
5890 5884
5891 /* In case user calls debug_print during GC, 5885 /* In case user calls debug_print during GC,
5892 don't let that cause a recursive GC. */ 5886 don't let that cause a recursive GC. */
5893 consing_until_gc = INTMAX_MAX; 5887 consing_until_gc = HI_THRESHOLD;
5894 5888
5895 /* Save what's currently displayed in the echo area. Don't do that 5889 /* Save what's currently displayed in the echo area. Don't do that
5896 if we are GC'ing because we've run out of memory, since 5890 if we are GC'ing because we've run out of memory, since
@@ -6002,7 +5996,7 @@ garbage_collect_1 (struct gcstat *gcst)
6002 unblock_input (); 5996 unblock_input ();
6003 5997
6004 consing_until_gc = gc_threshold 5998 consing_until_gc = gc_threshold
6005 = consing_threshold (gc_cons_threshold, Vgc_cons_percentage); 5999 = consing_threshold (gc_cons_threshold, Vgc_cons_percentage, 0);
6006 6000
6007 if (garbage_collection_messages && NILP (Vmemory_full)) 6001 if (garbage_collection_messages && NILP (Vmemory_full))
6008 { 6002 {
@@ -6014,8 +6008,6 @@ garbage_collect_1 (struct gcstat *gcst)
6014 6008
6015 unbind_to (count, Qnil); 6009 unbind_to (count, Qnil);
6016 6010
6017 *gcst = gcstat;
6018
6019 /* GC is complete: now we can run our finalizer callbacks. */ 6011 /* GC is complete: now we can run our finalizer callbacks. */
6020 run_finalizers (&doomed_finalizers); 6012 run_finalizers (&doomed_finalizers);
6021 6013
@@ -6029,29 +6021,21 @@ garbage_collect_1 (struct gcstat *gcst)
6029 /* Accumulate statistics. */ 6021 /* Accumulate statistics. */
6030 if (FLOATP (Vgc_elapsed)) 6022 if (FLOATP (Vgc_elapsed))
6031 { 6023 {
6032 struct timespec since_start = timespec_sub (current_timespec (), start); 6024 static struct timespec gc_elapsed;
6033 Vgc_elapsed = make_float (XFLOAT_DATA (Vgc_elapsed) 6025 gc_elapsed = timespec_add (gc_elapsed,
6034 + timespectod (since_start)); 6026 timespec_sub (current_timespec (), start));
6027 Vgc_elapsed = make_float (timespectod (gc_elapsed));
6035 } 6028 }
6036 6029
6037 gcs_done++; 6030 gcs_done++;
6038 6031
6039 /* Collect profiling data. */ 6032 /* Collect profiling data. */
6040 if (profiler_memory_running) 6033 if (tot_before != (byte_ct) -1)
6041 { 6034 {
6042 byte_ct tot_after = total_bytes_of_live_objects (); 6035 byte_ct tot_after = total_bytes_of_live_objects ();
6043 byte_ct swept = tot_before <= tot_after ? 0 : tot_before - tot_after; 6036 if (tot_after < tot_before)
6044 malloc_probe (min (swept, SIZE_MAX)); 6037 malloc_probe (min (tot_before - tot_after, SIZE_MAX));
6045 } 6038 }
6046
6047 return true;
6048}
6049
6050void
6051garbage_collect (void)
6052{
6053 struct gcstat gcst;
6054 garbage_collect_1 (&gcst);
6055} 6039}
6056 6040
6057DEFUN ("garbage-collect", Fgarbage_collect, Sgarbage_collect, 0, 0, "", 6041DEFUN ("garbage-collect", Fgarbage_collect, Sgarbage_collect, 0, 0, "",
@@ -6071,10 +6055,12 @@ returns nil, because real GC can't be done.
6071See Info node `(elisp)Garbage Collection'. */) 6055See Info node `(elisp)Garbage Collection'. */)
6072 (void) 6056 (void)
6073{ 6057{
6074 struct gcstat gcst; 6058 if (garbage_collection_inhibited)
6075 if (!garbage_collect_1 (&gcst))
6076 return Qnil; 6059 return Qnil;
6077 6060
6061 garbage_collect ();
6062 struct gcstat gcst = gcstat;
6063
6078 Lisp_Object total[] = { 6064 Lisp_Object total[] = {
6079 list4 (Qconses, make_fixnum (sizeof (struct Lisp_Cons)), 6065 list4 (Qconses, make_fixnum (sizeof (struct Lisp_Cons)),
6080 make_int (gcst.total_conses), 6066 make_int (gcst.total_conses),