aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorRichard M. Stallman1995-03-28 17:43:24 +0000
committerRichard M. Stallman1995-03-28 17:43:24 +0000
commit49f82b3d1a092f8e0d864982acaf9678e8571dd0 (patch)
tree81591ca08c0f5ebfa44310115b108370f7e67885 /src
parentc782bea55047f1a15ae78d07a9e2c1ad609d8929 (diff)
downloademacs-49f82b3d1a092f8e0d864982acaf9678e8571dd0.tar.gz
emacs-49f82b3d1a092f8e0d864982acaf9678e8571dd0.zip
(r_re_alloc): Correct realloc behavior--allow shrinking
of blocks while reallocating, if shrinking by more than one page. (relocate_blocs, resize_blocs): Added failsafe to protect future calling of these routines when in frozen state. (r_alloc_thaw): Added call to r_alloc_init. (relocate_blocks, resize_bloc, r_alloc_sbrk, r_alloc_thaw): Extended functionality to let ralloc package work in frozen state, allowing for the existence of unused blocks.
Diffstat (limited to 'src')
-rw-r--r--src/ralloc.c166
1 files changed, 140 insertions, 26 deletions
diff --git a/src/ralloc.c b/src/ralloc.c
index d5248f2cb0a..b6fcb323395 100644
--- a/src/ralloc.c
+++ b/src/ralloc.c
@@ -166,7 +166,13 @@ static heap_ptr first_heap, last_heap;
166/* These structures are allocated in the malloc arena. 166/* These structures are allocated in the malloc arena.
167 The linked list is kept in order of increasing '.data' members. 167 The linked list is kept in order of increasing '.data' members.
168 The data blocks abut each other; if b->next is non-nil, then 168 The data blocks abut each other; if b->next is non-nil, then
169 b->data + b->size == b->next->data. */ 169 b->data + b->size == b->next->data.
170
171 An element with variable==NIL denotes a freed block, which has not yet
172 been collected. They may only appear while r_alloc_freeze > 0, and will be
173 freed when the arena is thawed. Currently, these blocs are not reusable,
174 while the arena is frozen. Very inefficent. */
175
170typedef struct bp 176typedef struct bp
171{ 177{
172 struct bp *next; 178 struct bp *next;
@@ -175,8 +181,7 @@ typedef struct bp
175 POINTER data; 181 POINTER data;
176 SIZE size; 182 SIZE size;
177 POINTER new_data; /* tmporarily used for relocation */ 183 POINTER new_data; /* tmporarily used for relocation */
178 /* Heap this bloc is in. */ 184 struct heap *heap; /* Heap this bloc is in. */
179 struct heap *heap;
180} *bloc_ptr; 185} *bloc_ptr;
181 186
182#define NIL_BLOC ((bloc_ptr) 0) 187#define NIL_BLOC ((bloc_ptr) 0)
@@ -185,6 +190,11 @@ typedef struct bp
185/* Head and tail of the list of relocatable blocs. */ 190/* Head and tail of the list of relocatable blocs. */
186static bloc_ptr first_bloc, last_bloc; 191static bloc_ptr first_bloc, last_bloc;
187 192
193static int use_relocatable_buffers;
194
195/* If >0, no relocation whatsoever takes place. */
196static int r_alloc_freeze_level;
197
188 198
189/* Functions to get and return memory from the system. */ 199/* Functions to get and return memory from the system. */
190 200
@@ -451,6 +461,10 @@ relocate_blocs (bloc, heap, address)
451{ 461{
452 register bloc_ptr b = bloc; 462 register bloc_ptr b = bloc;
453 463
464 /* No need to ever call this if arena is frozen, bug somewhere! */
465 if (r_alloc_freeze_level)
466 abort();
467
454 while (b) 468 while (b)
455 { 469 {
456 /* If bloc B won't fit within HEAP, 470 /* If bloc B won't fit within HEAP,
@@ -473,7 +487,9 @@ relocate_blocs (bloc, heap, address)
473 /* Add up the size of all the following blocs. */ 487 /* Add up the size of all the following blocs. */
474 while (tb != NIL_BLOC) 488 while (tb != NIL_BLOC)
475 { 489 {
476 s += tb->size; 490 if (tb->variable)
491 s += tb->size;
492
477 tb = tb->next; 493 tb = tb->next;
478 } 494 }
479 495
@@ -488,7 +504,8 @@ relocate_blocs (bloc, heap, address)
488 /* Record the new address of this bloc 504 /* Record the new address of this bloc
489 and update where the next bloc can start. */ 505 and update where the next bloc can start. */
490 b->new_data = address; 506 b->new_data = address;
491 address += b->size; 507 if (b->variable)
508 address += b->size;
492 b = b->next; 509 b = b->next;
493 } 510 }
494 511
@@ -602,6 +619,10 @@ resize_bloc (bloc, size)
602 POINTER address; 619 POINTER address;
603 SIZE old_size; 620 SIZE old_size;
604 621
622 /* No need to ever call this if arena is frozen, bug somewhere! */
623 if (r_alloc_freeze_level)
624 abort();
625
605 if (bloc == NIL_BLOC || size == bloc->size) 626 if (bloc == NIL_BLOC || size == bloc->size)
606 return 1; 627 return 1;
607 628
@@ -637,19 +658,43 @@ resize_bloc (bloc, size)
637 { 658 {
638 for (b = last_bloc; b != bloc; b = b->prev) 659 for (b = last_bloc; b != bloc; b = b->prev)
639 { 660 {
640 safe_bcopy (b->data, b->new_data, b->size); 661 if (!b->variable)
641 *b->variable = b->data = b->new_data; 662 {
663 b->size = 0;
664 b->data = b->new_data;
665 }
666 else
667 {
668 safe_bcopy (b->data, b->new_data, b->size);
669 *b->variable = b->data = b->new_data;
670 }
671 }
672 if (!bloc->variable)
673 {
674 bloc->size = 0;
675 bloc->data = bloc->new_data;
676 }
677 else
678 {
679 safe_bcopy (bloc->data, bloc->new_data, old_size);
680 bzero (bloc->new_data + old_size, size - old_size);
681 *bloc->variable = bloc->data = bloc->new_data;
642 } 682 }
643 safe_bcopy (bloc->data, bloc->new_data, old_size);
644 bzero (bloc->new_data + old_size, size - old_size);
645 *bloc->variable = bloc->data = bloc->new_data;
646 } 683 }
647 else 684 else
648 { 685 {
649 for (b = bloc; b != NIL_BLOC; b = b->next) 686 for (b = bloc; b != NIL_BLOC; b = b->next)
650 { 687 {
651 safe_bcopy (b->data, b->new_data, b->size); 688 if (!b->variable)
652 *b->variable = b->data = b->new_data; 689 {
690 b->size = 0;
691 b->data = b->new_data;
692 }
693 else
694 {
695 safe_bcopy (b->data, b->new_data, b->size);
696 *b->variable = b->data = b->new_data;
697 }
653 } 698 }
654 } 699 }
655 700
@@ -669,6 +714,12 @@ free_bloc (bloc)
669{ 714{
670 heap_ptr heap = bloc->heap; 715 heap_ptr heap = bloc->heap;
671 716
717 if (r_alloc_freeze_level)
718 {
719 bloc->variable = (POINTER *) NIL;
720 return;
721 }
722
672 resize_bloc (bloc, 0); 723 resize_bloc (bloc, 0);
673 724
674 if (bloc == first_bloc && bloc == last_bloc) 725 if (bloc == first_bloc && bloc == last_bloc)
@@ -713,9 +764,6 @@ free_bloc (bloc)
713 764
714/* Interface routines. */ 765/* Interface routines. */
715 766
716static int use_relocatable_buffers;
717static int r_alloc_freeze_level;
718
719/* Obtain SIZE bytes of storage from the free pool, or the system, as 767/* Obtain SIZE bytes of storage from the free pool, or the system, as
720 necessary. If relocatable blocs are in use, this means relocating 768 necessary. If relocatable blocs are in use, this means relocating
721 them. This function gets plugged into the GNU malloc's __morecore 769 them. This function gets plugged into the GNU malloc's __morecore
@@ -768,7 +816,7 @@ r_alloc_sbrk (size)
768 { 816 {
769 get += extra_bytes + page_size; 817 get += extra_bytes + page_size;
770 818
771 if (r_alloc_freeze_level > 0 || ! obtain (address, get)) 819 if (! obtain (address, get))
772 return 0; 820 return 0;
773 821
774 if (first_heap == last_heap) 822 if (first_heap == last_heap)
@@ -782,9 +830,16 @@ r_alloc_sbrk (size)
782 830
783 if (first_heap->bloc_start < new_bloc_start) 831 if (first_heap->bloc_start < new_bloc_start)
784 { 832 {
833 /* This is no clean solution - no idea how to do it better. */
834 if (r_alloc_freeze_level)
835 return NIL;
836
837 /* There is a bug here: if the above obtain call succeeded, but the
838 relocate_blocs call below does not succeed, we need to free
839 the memory that we got with obtain. */
840
785 /* Move all blocs upward. */ 841 /* Move all blocs upward. */
786 if (r_alloc_freeze_level > 0 842 if (! relocate_blocs (first_bloc, h, new_bloc_start))
787 || ! relocate_blocs (first_bloc, h, new_bloc_start))
788 return 0; 843 return 0;
789 844
790 /* Note that (POINTER)(h+1) <= new_bloc_start since 845 /* Note that (POINTER)(h+1) <= new_bloc_start since
@@ -800,7 +855,6 @@ r_alloc_sbrk (size)
800 855
801 update_heap_bloc_correspondence (first_bloc, h); 856 update_heap_bloc_correspondence (first_bloc, h);
802 } 857 }
803
804 if (h != first_heap) 858 if (h != first_heap)
805 { 859 {
806 /* Give up managing heaps below the one the new 860 /* Give up managing heaps below the one the new
@@ -865,6 +919,10 @@ r_alloc_sbrk (size)
865 the data is returned in *PTR. PTR is thus the address of some variable 919 the data is returned in *PTR. PTR is thus the address of some variable
866 which will use the data area. 920 which will use the data area.
867 921
922 The allocation of 0 bytes is valid.
923 In case r_alloc_freeze is set, a best fit of unused blocs could be done
924 before allocating a new area. Not yet done.
925
868 If we can't allocate the necessary memory, set *PTR to zero, and 926 If we can't allocate the necessary memory, set *PTR to zero, and
869 return zero. */ 927 return zero. */
870 928
@@ -919,6 +977,10 @@ r_alloc_free (ptr)
919 SIZE is less than or equal to the current bloc size, in which case 977 SIZE is less than or equal to the current bloc size, in which case
920 do nothing. 978 do nothing.
921 979
980 In case r_alloc_freeze is set, a new bloc is allocated, and the
981 memory copied to it. Not very efficent. We could traverse the
982 bloc_list for a best fit of free blocs first.
983
922 Change *PTR to reflect the new bloc, and return this value. 984 Change *PTR to reflect the new bloc, and return this value.
923 985
924 If more memory cannot be allocated, then leave *PTR unchanged, and 986 If more memory cannot be allocated, then leave *PTR unchanged, and
@@ -934,17 +996,51 @@ r_re_alloc (ptr, size)
934 if (! r_alloc_initialized) 996 if (! r_alloc_initialized)
935 r_alloc_init (); 997 r_alloc_init ();
936 998
999 if (!*ptr)
1000 return r_alloc (ptr, size);
1001 if (!size)
1002 {
1003 r_alloc_free (ptr);
1004 return r_alloc (ptr, 0);
1005 }
1006
937 bloc = find_bloc (ptr); 1007 bloc = find_bloc (ptr);
938 if (bloc == NIL_BLOC) 1008 if (bloc == NIL_BLOC)
939 abort (); 1009 abort ();
940 1010
941 if (size <= bloc->size) 1011 if (size < bloc->size)
942 /* Wouldn't it be useful to actually resize the bloc here? */ 1012 {
943 return *ptr; 1013 /* Wouldn't it be useful to actually resize the bloc here? */
944 1014 /* I think so too, but not if it's too expensive... */
945 if (! resize_bloc (bloc, MEM_ROUNDUP (size))) 1015 if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
946 return 0; 1016 && r_alloc_freeze_level == 0)
947 1017 {
1018 resize_bloc (bloc, MEM_ROUNDUP (size));
1019 /* Never mind if this fails, just do nothing... */
1020 /* It *should* be infallible! */
1021 }
1022 }
1023 else if (size > bloc->size)
1024 {
1025 if (r_alloc_freeze_level)
1026 {
1027 bloc_ptr new_bloc;
1028 new_bloc = get_bloc (MEM_ROUNDUP (size));
1029 if (new_bloc)
1030 {
1031 new_bloc->variable = ptr;
1032 *ptr = new_bloc->data;
1033 bloc->variable = (POINTER *) NIL;
1034 }
1035 else
1036 return NIL;
1037 }
1038 else
1039 {
1040 if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
1041 return NIL;
1042 }
1043 }
948 return *ptr; 1044 return *ptr;
949} 1045}
950 1046
@@ -974,9 +1070,27 @@ r_alloc_freeze (size)
974void 1070void
975r_alloc_thaw () 1071r_alloc_thaw ()
976{ 1072{
1073
1074 if (! r_alloc_initialized)
1075 r_alloc_init ();
1076
977 if (--r_alloc_freeze_level < 0) 1077 if (--r_alloc_freeze_level < 0)
978 abort (); 1078 abort ();
1079
1080 /* This frees all unused blocs. It is not too inefficent, as the resize
1081 and bcopy is done only once. Afterwards, all unreferenced blocs are
1082 already shrunk to zero size. */
1083 if (!r_alloc_freeze_level)
1084 {
1085 bloc_ptr *b = &first_bloc;
1086 while (*b)
1087 if (!(*b)->variable)
1088 free_bloc (*b);
1089 else
1090 b = &(*b)->next;
1091 }
979} 1092}
1093
980 1094
981/* The hook `malloc' uses for the function which gets more space 1095/* The hook `malloc' uses for the function which gets more space
982 from the system. */ 1096 from the system. */