diff options
| author | Richard M. Stallman | 1994-10-18 21:53:19 +0000 |
|---|---|---|
| committer | Richard M. Stallman | 1994-10-18 21:53:19 +0000 |
| commit | abe9ff327fdcb86d6b1c051a6b6c77a31202edc6 (patch) | |
| tree | 8c7674c8f28e3a0b57939c41d6075aa5437586c7 /src | |
| parent | 149cbcb0b7d9aecbc41ecb497c9921549e217409 (diff) | |
| download | emacs-abe9ff327fdcb86d6b1c051a6b6c77a31202edc6.tar.gz emacs-abe9ff327fdcb86d6b1c051a6b6c77a31202edc6.zip | |
(heap_base): Move static var to top level.
(struct heap): New slot `free'.
(obtain): Set `free' for new heap.
(get_bloc): Update `free'.
(find_heap): New function.
(update_heap_free_pointers): New function.
(resize_bloc, r_alloc_sbrk): Call update_heap_free_pointers.
Diffstat (limited to 'src')
| -rw-r--r-- | src/ralloc.c | 289 |
1 files changed, 208 insertions, 81 deletions
diff --git a/src/ralloc.c b/src/ralloc.c index 0ee166913c4..997faf89a02 100644 --- a/src/ralloc.c +++ b/src/ralloc.c | |||
| @@ -21,7 +21,7 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ | |||
| 21 | 21 | ||
| 22 | Only relocate the blocs necessary for SIZE in r_alloc_sbrk, | 22 | Only relocate the blocs necessary for SIZE in r_alloc_sbrk, |
| 23 | rather than all of them. This means allowing for a possible | 23 | rather than all of them. This means allowing for a possible |
| 24 | hole between the first bloc and the end of malloc storage. */ | 24 | hole between the first bloc and the end of malloc storage. */ |
| 25 | 25 | ||
| 26 | #ifdef emacs | 26 | #ifdef emacs |
| 27 | 27 | ||
| @@ -87,13 +87,14 @@ static void r_alloc_init (); | |||
| 87 | 87 | ||
| 88 | /* Declarations for working with the malloc, ralloc, and system breaks. */ | 88 | /* Declarations for working with the malloc, ralloc, and system breaks. */ |
| 89 | 89 | ||
| 90 | /* Function to set the real break value. */ | 90 | /* Function to set the real break value. */ |
| 91 | static POINTER (*real_morecore) (); | 91 | static POINTER (*real_morecore) (); |
| 92 | 92 | ||
| 93 | /* The break value, as seen by malloc (). */ | 93 | /* The break value, as seen by malloc. */ |
| 94 | static POINTER virtual_break_value; | 94 | static POINTER virtual_break_value; |
| 95 | 95 | ||
| 96 | /* The break value, viewed by the relocatable blocs. */ | 96 | /* The address of the end of the last data in use by ralloc, |
| 97 | including relocatable blocs as well as malloc data. */ | ||
| 97 | static POINTER break_value; | 98 | static POINTER break_value; |
| 98 | 99 | ||
| 99 | /* This is the size of a page. We round memory requests to this boundary. */ | 100 | /* This is the size of a page. We round memory requests to this boundary. */ |
| @@ -104,7 +105,7 @@ static int page_size; | |||
| 104 | static int extra_bytes; | 105 | static int extra_bytes; |
| 105 | 106 | ||
| 106 | /* Macros for rounding. Note that rounding to any value is possible | 107 | /* Macros for rounding. Note that rounding to any value is possible |
| 107 | by changing the definition of PAGE. */ | 108 | by changing the definition of PAGE. */ |
| 108 | #define PAGE (getpagesize ()) | 109 | #define PAGE (getpagesize ()) |
| 109 | #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0) | 110 | #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0) |
| 110 | #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ | 111 | #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ |
| @@ -115,20 +116,44 @@ static int extra_bytes; | |||
| 115 | #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \ | 116 | #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \ |
| 116 | & ~(MEM_ALIGN - 1)) | 117 | & ~(MEM_ALIGN - 1)) |
| 117 | 118 | ||
| 118 | /* Data structures of heaps and blocs */ | 119 | /* Data structures of heaps and blocs. */ |
| 120 | |||
| 121 | /* The relocatable objects, or blocs, and the malloc data | ||
| 122 | both reside within one or more heaps. | ||
| 123 | Each heap contains malloc data, running from `start' to `bloc_start', | ||
| 124 | and relocatable objects, running from `bloc_start' to `free'. | ||
| 125 | |||
| 126 | Relocatable objects may relocate within the same heap | ||
| 127 | or may move into another heap; the heaps themselves may grow | ||
| 128 | but they never move. | ||
| 129 | |||
| 130 | We try to make just one heap and make it larger as necessary. | ||
| 131 | But sometimes we can't do that, because we can't get continguous | ||
| 132 | space to add onto the heap. When that happens, we start a new heap. */ | ||
| 133 | |||
| 119 | typedef struct heap | 134 | typedef struct heap |
| 120 | { | 135 | { |
| 121 | struct heap *next; | 136 | struct heap *next; |
| 122 | struct heap *prev; | 137 | struct heap *prev; |
| 138 | /* Start of memory range of this heap. */ | ||
| 123 | POINTER start; | 139 | POINTER start; |
| 140 | /* End of memory range of this heap. */ | ||
| 124 | POINTER end; | 141 | POINTER end; |
| 125 | POINTER bloc_start; /* start of relocatable blocs */ | 142 | /* Start of relocatable data in this heap. */ |
| 143 | POINTER bloc_start; | ||
| 144 | /* Start of unused space in this heap. */ | ||
| 145 | POINTER free; | ||
| 126 | } *heap_ptr; | 146 | } *heap_ptr; |
| 127 | 147 | ||
| 128 | #define NIL_HEAP ((heap_ptr) 0) | 148 | #define NIL_HEAP ((heap_ptr) 0) |
| 129 | #define HEAP_PTR_SIZE (sizeof (struct heap)) | 149 | #define HEAP_PTR_SIZE (sizeof (struct heap)) |
| 130 | 150 | ||
| 131 | /* Head and tail of the list of heaps. */ | 151 | /* This is the first heap object. |
| 152 | If we need additional heap objects, each one resides at the beginning of | ||
| 153 | the space it covers. */ | ||
| 154 | static struct heap heap_base; | ||
| 155 | |||
| 156 | /* Head and tail of the list of heaps. */ | ||
| 132 | static heap_ptr first_heap, last_heap; | 157 | static heap_ptr first_heap, last_heap; |
| 133 | 158 | ||
| 134 | /* These structures are allocated in the malloc arena. | 159 | /* These structures are allocated in the malloc arena. |
| @@ -148,20 +173,47 @@ typedef struct bp | |||
| 148 | #define NIL_BLOC ((bloc_ptr) 0) | 173 | #define NIL_BLOC ((bloc_ptr) 0) |
| 149 | #define BLOC_PTR_SIZE (sizeof (struct bp)) | 174 | #define BLOC_PTR_SIZE (sizeof (struct bp)) |
| 150 | 175 | ||
| 151 | /* Head and tail of the list of relocatable blocs. */ | 176 | /* Head and tail of the list of relocatable blocs. */ |
| 152 | static bloc_ptr first_bloc, last_bloc; | 177 | static bloc_ptr first_bloc, last_bloc; |
| 153 | 178 | ||
| 154 | 179 | ||
| 155 | /* Functions to get and return memory from the system. */ | 180 | /* Functions to get and return memory from the system. */ |
| 156 | 181 | ||
| 157 | /* Obtain SIZE bytes of space starting at ADDRESS in a heap. | 182 | /* Find the heap that ADDRESS falls within. */ |
| 183 | |||
| 184 | static heap_ptr | ||
| 185 | find_heap (address) | ||
| 186 | POINTER address; | ||
| 187 | { | ||
| 188 | heap_ptr heap; | ||
| 189 | |||
| 190 | for (heap = last_heap; heap; heap = heap->prev) | ||
| 191 | { | ||
| 192 | if (heap->start <= address && address <= heap->end) | ||
| 193 | return heap; | ||
| 194 | } | ||
| 195 | |||
| 196 | return NIL_HEAP; | ||
| 197 | } | ||
| 198 | |||
| 199 | /* Find SIZE bytes of space in a heap. | ||
| 200 | Try to get them at ADDRESS (which must fall within some heap's range) | ||
| 201 | if we can get that many within one heap. | ||
| 202 | |||
| 158 | If enough space is not presently available in our reserve, this means | 203 | If enough space is not presently available in our reserve, this means |
| 159 | getting more page-aligned space from the system. If the retuned space | 204 | getting more page-aligned space from the system. If the retuned space |
| 160 | is not contiguos to the last heap, allocate a new heap, and append it | 205 | is not contiguos to the last heap, allocate a new heap, and append it |
| 161 | to the heap list. | 206 | |
| 207 | obtain does not try to keep track of whether space is in use | ||
| 208 | or not in use. It just returns the address of SIZE bytes that | ||
| 209 | fall within a single heap. If you call obtain twice in a row | ||
| 210 | with the same arguments, you typically get the same value. | ||
| 211 | to the heap list. It's the caller's responsibility to keep | ||
| 212 | track of what space is in use. | ||
| 162 | 213 | ||
| 163 | Return the address of the space if all went well, or zero if we couldn't | 214 | Return the address of the space if all went well, or zero if we couldn't |
| 164 | allocate the memory. */ | 215 | allocate the memory. */ |
| 216 | |||
| 165 | static POINTER | 217 | static POINTER |
| 166 | obtain (address, size) | 218 | obtain (address, size) |
| 167 | POINTER address; | 219 | POINTER address; |
| @@ -170,6 +222,7 @@ obtain (address, size) | |||
| 170 | heap_ptr heap; | 222 | heap_ptr heap; |
| 171 | SIZE already_available; | 223 | SIZE already_available; |
| 172 | 224 | ||
| 225 | /* Find the heap that ADDRESS falls within. */ | ||
| 173 | for (heap = last_heap; heap; heap = heap->prev) | 226 | for (heap = last_heap; heap; heap = heap->prev) |
| 174 | { | 227 | { |
| 175 | if (heap->start <= address && address <= heap->end) | 228 | if (heap->start <= address && address <= heap->end) |
| @@ -177,8 +230,10 @@ obtain (address, size) | |||
| 177 | } | 230 | } |
| 178 | 231 | ||
| 179 | if (! heap) | 232 | if (! heap) |
| 180 | abort(); | 233 | abort (); |
| 181 | 234 | ||
| 235 | /* If we can't fit SIZE bytes in that heap, | ||
| 236 | try successive later heaps. */ | ||
| 182 | while (heap && address + size > heap->end) | 237 | while (heap && address + size > heap->end) |
| 183 | { | 238 | { |
| 184 | heap = heap->next; | 239 | heap = heap->next; |
| @@ -187,6 +242,8 @@ obtain (address, size) | |||
| 187 | address = heap->bloc_start; | 242 | address = heap->bloc_start; |
| 188 | } | 243 | } |
| 189 | 244 | ||
| 245 | /* If we can't fit them within any existing heap, | ||
| 246 | get more space. */ | ||
| 190 | if (heap == NIL_HEAP) | 247 | if (heap == NIL_HEAP) |
| 191 | { | 248 | { |
| 192 | POINTER new = (*real_morecore)(0); | 249 | POINTER new = (*real_morecore)(0); |
| @@ -196,9 +253,10 @@ obtain (address, size) | |||
| 196 | 253 | ||
| 197 | if (new != last_heap->end) | 254 | if (new != last_heap->end) |
| 198 | { | 255 | { |
| 199 | /* Someone else called sbrk(). */ | 256 | /* Someone else called sbrk. Make a new heap. */ |
| 200 | heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP(new); | 257 | |
| 201 | POINTER bloc_start = (POINTER) MEM_ROUNDUP((POINTER)(new_heap + 1)); | 258 | heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new); |
| 259 | POINTER bloc_start = (POINTER) MEM_ROUNDUP ((POINTER)(new_heap + 1)); | ||
| 202 | 260 | ||
| 203 | if ((*real_morecore) (bloc_start - new) != new) | 261 | if ((*real_morecore) (bloc_start - new) != new) |
| 204 | return 0; | 262 | return 0; |
| @@ -206,6 +264,7 @@ obtain (address, size) | |||
| 206 | new_heap->start = new; | 264 | new_heap->start = new; |
| 207 | new_heap->end = bloc_start; | 265 | new_heap->end = bloc_start; |
| 208 | new_heap->bloc_start = bloc_start; | 266 | new_heap->bloc_start = bloc_start; |
| 267 | new_heap->free = bloc_start; | ||
| 209 | new_heap->next = NIL_HEAP; | 268 | new_heap->next = NIL_HEAP; |
| 210 | new_heap->prev = last_heap; | 269 | new_heap->prev = last_heap; |
| 211 | last_heap->next = new_heap; | 270 | last_heap->next = new_heap; |
| @@ -215,9 +274,11 @@ obtain (address, size) | |||
| 215 | already_available = 0; | 274 | already_available = 0; |
| 216 | } | 275 | } |
| 217 | 276 | ||
| 218 | /* Get some extra, so we can come here less often. */ | 277 | /* Add space to the last heap (which we may have just created). |
| 278 | Get some extra, so we can come here less often. */ | ||
| 279 | |||
| 219 | get = size + extra_bytes - already_available; | 280 | get = size + extra_bytes - already_available; |
| 220 | get = (char *) ROUNDUP((char *)last_heap->end + get) | 281 | get = (char *) ROUNDUP ((char *)last_heap->end + get) |
| 221 | - (char *) last_heap->end; | 282 | - (char *) last_heap->end; |
| 222 | 283 | ||
| 223 | if ((*real_morecore) (get) != last_heap->end) | 284 | if ((*real_morecore) (get) != last_heap->end) |
| @@ -229,13 +290,20 @@ obtain (address, size) | |||
| 229 | return address; | 290 | return address; |
| 230 | } | 291 | } |
| 231 | 292 | ||
| 232 | /* If the last heap has a excessive space, return it to the system. */ | 293 | /* Return unused heap space to the system |
| 294 | if there is a lot of unused space now. | ||
| 295 | This can make the last heap smaller; | ||
| 296 | it can also eliminate the last heap entirely. */ | ||
| 297 | |||
| 233 | static void | 298 | static void |
| 234 | relinquish () | 299 | relinquish () |
| 235 | { | 300 | { |
| 236 | register heap_ptr h; | 301 | register heap_ptr h; |
| 237 | int excess = 0; | 302 | int excess = 0; |
| 238 | 303 | ||
| 304 | /* Add the amount of space beyond break_value | ||
| 305 | in all heaps which have extend beyond break_value at all. */ | ||
| 306 | |||
| 239 | for (h = last_heap; h && break_value < h->end; h = h->prev) | 307 | for (h = last_heap; h && break_value < h->end; h = h->prev) |
| 240 | { | 308 | { |
| 241 | excess += (char *) h->end - (char *) ((break_value < h->bloc_start) | 309 | excess += (char *) h->end - (char *) ((break_value < h->bloc_start) |
| @@ -250,7 +318,7 @@ relinquish () | |||
| 250 | 318 | ||
| 251 | if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess) | 319 | if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess) |
| 252 | { | 320 | { |
| 253 | /* Return the last heap with its header to the system */ | 321 | /* Return the last heap, with its header, to the system. */ |
| 254 | excess = (char *)last_heap->end - (char *)last_heap->start; | 322 | excess = (char *)last_heap->end - (char *)last_heap->start; |
| 255 | last_heap = last_heap->prev; | 323 | last_heap = last_heap->prev; |
| 256 | last_heap->next = NIL_HEAP; | 324 | last_heap->next = NIL_HEAP; |
| @@ -258,7 +326,7 @@ relinquish () | |||
| 258 | else | 326 | else |
| 259 | { | 327 | { |
| 260 | excess = (char *) last_heap->end | 328 | excess = (char *) last_heap->end |
| 261 | - (char *) ROUNDUP((char *)last_heap->end - excess); | 329 | - (char *) ROUNDUP ((char *)last_heap->end - excess); |
| 262 | last_heap->end -= excess; | 330 | last_heap->end -= excess; |
| 263 | } | 331 | } |
| 264 | 332 | ||
| @@ -270,7 +338,7 @@ relinquish () | |||
| 270 | /* The meat - allocating, freeing, and relocating blocs. */ | 338 | /* The meat - allocating, freeing, and relocating blocs. */ |
| 271 | 339 | ||
| 272 | /* Find the bloc referenced by the address in PTR. Returns a pointer | 340 | /* Find the bloc referenced by the address in PTR. Returns a pointer |
| 273 | to that block. */ | 341 | to that block. */ |
| 274 | 342 | ||
| 275 | static bloc_ptr | 343 | static bloc_ptr |
| 276 | find_bloc (ptr) | 344 | find_bloc (ptr) |
| @@ -298,6 +366,7 @@ get_bloc (size) | |||
| 298 | SIZE size; | 366 | SIZE size; |
| 299 | { | 367 | { |
| 300 | register bloc_ptr new_bloc; | 368 | register bloc_ptr new_bloc; |
| 369 | register heap_ptr heap; | ||
| 301 | 370 | ||
| 302 | if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) | 371 | if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) |
| 303 | || ! (new_bloc->data = obtain (break_value, size))) | 372 | || ! (new_bloc->data = obtain (break_value, size))) |
| @@ -315,6 +384,11 @@ get_bloc (size) | |||
| 315 | new_bloc->variable = (POINTER *) NIL; | 384 | new_bloc->variable = (POINTER *) NIL; |
| 316 | new_bloc->new_data = 0; | 385 | new_bloc->new_data = 0; |
| 317 | 386 | ||
| 387 | /* Record in the heap that this space is in use. */ | ||
| 388 | heap = find_heap (new_bloc->data); | ||
| 389 | heap->free = break_value; | ||
| 390 | |||
| 391 | /* Put this bloc on the doubly-linked list of blocs. */ | ||
| 318 | if (first_bloc) | 392 | if (first_bloc) |
| 319 | { | 393 | { |
| 320 | new_bloc->prev = last_bloc; | 394 | new_bloc->prev = last_bloc; |
| @@ -330,12 +404,13 @@ get_bloc (size) | |||
| 330 | return new_bloc; | 404 | return new_bloc; |
| 331 | } | 405 | } |
| 332 | 406 | ||
| 333 | /* Calculate new locations of blocs in the list begining with BLOC, | 407 | /* Calculate new locations of blocs in the list beginning with BLOC, |
| 334 | whose spaces is started at ADDRESS in HEAP. If enough space is | 408 | relocating it to start at ADDRESS, in heap HEAP. If enough space is |
| 335 | not presently available in our reserve, obtain() is called for | 409 | not presently available in our reserve, call obtain for |
| 336 | more space. | 410 | more space. |
| 337 | 411 | ||
| 338 | Do not touch the contents of blocs or break_value. */ | 412 | Store the new location of each bloc in its new_data field. |
| 413 | Do not touch the contents of blocs or break_value. */ | ||
| 339 | 414 | ||
| 340 | static int | 415 | static int |
| 341 | relocate_blocs (bloc, heap, address) | 416 | relocate_blocs (bloc, heap, address) |
| @@ -347,6 +422,8 @@ relocate_blocs (bloc, heap, address) | |||
| 347 | 422 | ||
| 348 | while (b) | 423 | while (b) |
| 349 | { | 424 | { |
| 425 | /* If bloc B won't fit within HEAP, | ||
| 426 | move to the next heap and try again. */ | ||
| 350 | while (heap && address + b->size > heap->end) | 427 | while (heap && address + b->size > heap->end) |
| 351 | { | 428 | { |
| 352 | heap = heap->next; | 429 | heap = heap->next; |
| @@ -355,23 +432,30 @@ relocate_blocs (bloc, heap, address) | |||
| 355 | address = heap->bloc_start; | 432 | address = heap->bloc_start; |
| 356 | } | 433 | } |
| 357 | 434 | ||
| 435 | /* If BLOC won't fit in any heap, | ||
| 436 | get enough new space to hold BLOC and all following blocs. */ | ||
| 358 | if (heap == NIL_HEAP) | 437 | if (heap == NIL_HEAP) |
| 359 | { | 438 | { |
| 360 | register bloc_ptr tb = b; | 439 | register bloc_ptr tb = b; |
| 361 | register SIZE s = 0; | 440 | register SIZE s = 0; |
| 362 | 441 | ||
| 442 | /* Add up the size of all the following blocs. */ | ||
| 363 | while (tb != NIL_BLOC) | 443 | while (tb != NIL_BLOC) |
| 364 | { | 444 | { |
| 365 | s += tb->size; | 445 | s += tb->size; |
| 366 | tb = tb->next; | 446 | tb = tb->next; |
| 367 | } | 447 | } |
| 368 | 448 | ||
| 369 | if (! (address = obtain(address, s))) | 449 | /* Get that space. */ |
| 450 | address = obtain (address, s); | ||
| 451 | if (address == 0) | ||
| 370 | return 0; | 452 | return 0; |
| 371 | 453 | ||
| 372 | heap = last_heap; | 454 | heap = last_heap; |
| 373 | } | 455 | } |
| 374 | 456 | ||
| 457 | /* Record the new address of this bloc | ||
| 458 | and update where the next bloc can start. */ | ||
| 375 | b->new_data = address; | 459 | b->new_data = address; |
| 376 | address += b->size; | 460 | address += b->size; |
| 377 | b = b->next; | 461 | b = b->next; |
| @@ -380,7 +464,45 @@ relocate_blocs (bloc, heap, address) | |||
| 380 | return 1; | 464 | return 1; |
| 381 | } | 465 | } |
| 382 | 466 | ||
| 383 | /* Resize BLOC to SIZE bytes. */ | 467 | /* Update the free pointers of all heaps starting with HEAP |
| 468 | based on the blocs starting with BLOC. BLOC should be in | ||
| 469 | heap HEAP. */ | ||
| 470 | |||
| 471 | static | ||
| 472 | update_heap_free_pointers (bloc, heap) | ||
| 473 | bloc_ptr bloc; | ||
| 474 | heap_ptr heap; | ||
| 475 | { | ||
| 476 | register bloc_ptr b; | ||
| 477 | |||
| 478 | /* Advance through blocs one by one. */ | ||
| 479 | for (b = bloc; b != NIL_BLOC; b = b->next) | ||
| 480 | { | ||
| 481 | /* Advance through heaps in sync with the blocs that are in them. */ | ||
| 482 | while (heap) | ||
| 483 | { | ||
| 484 | if (heap->bloc_start <= b->data && b->data <= heap->end) | ||
| 485 | break; | ||
| 486 | heap = heap->next; | ||
| 487 | heap->free = heap->bloc_start; | ||
| 488 | } | ||
| 489 | /* In each heap, record the end of the last bloc in it. */ | ||
| 490 | heap->free = b->data + b->size; | ||
| 491 | } | ||
| 492 | |||
| 493 | /* If there are any remaining heaps and no blocs left, | ||
| 494 | update the `free' slot assuming they contain no blocs. */ | ||
| 495 | heap = heap->next; | ||
| 496 | while (heap) | ||
| 497 | { | ||
| 498 | heap->free = heap->bloc_start; | ||
| 499 | heap = heap->next; | ||
| 500 | } | ||
| 501 | } | ||
| 502 | |||
| 503 | /* Resize BLOC to SIZE bytes. This relocates the blocs | ||
| 504 | that come after BLOC in memory. */ | ||
| 505 | |||
| 384 | static int | 506 | static int |
| 385 | resize_bloc (bloc, size) | 507 | resize_bloc (bloc, size) |
| 386 | bloc_ptr bloc; | 508 | bloc_ptr bloc; |
| @@ -401,14 +523,14 @@ resize_bloc (bloc, size) | |||
| 401 | } | 523 | } |
| 402 | 524 | ||
| 403 | if (heap == NIL_HEAP) | 525 | if (heap == NIL_HEAP) |
| 404 | abort(); | 526 | abort (); |
| 405 | 527 | ||
| 406 | old_size = bloc->size; | 528 | old_size = bloc->size; |
| 407 | bloc->size = size; | 529 | bloc->size = size; |
| 408 | 530 | ||
| 409 | /* Note that bloc could be moved into the previous heap. */ | 531 | /* Note that bloc could be moved into the previous heap. */ |
| 410 | address = bloc->prev ? bloc->prev->data + bloc->prev->size | 532 | address = (bloc->prev ? bloc->prev->data + bloc->prev->size |
| 411 | : first_heap->bloc_start; | 533 | : first_heap->bloc_start); |
| 412 | while (heap) | 534 | while (heap) |
| 413 | { | 535 | { |
| 414 | if (heap->bloc_start <= address && address <= heap->end) | 536 | if (heap->bloc_start <= address && address <= heap->end) |
| @@ -442,13 +564,15 @@ resize_bloc (bloc, size) | |||
| 442 | } | 564 | } |
| 443 | } | 565 | } |
| 444 | 566 | ||
| 445 | break_value = last_bloc ? last_bloc->data + last_bloc->size | 567 | update_heap_free_pointers (bloc, heap); |
| 446 | : first_heap->bloc_start; | 568 | |
| 569 | break_value = (last_bloc ? last_bloc->data + last_bloc->size | ||
| 570 | : first_heap->bloc_start); | ||
| 447 | return 1; | 571 | return 1; |
| 448 | } | 572 | } |
| 449 | 573 | ||
| 450 | /* Free BLOC from the chain of blocs, relocating any blocs above it | 574 | /* Free BLOC from the chain of blocs, relocating any blocs above it. |
| 451 | and returning BLOC->size bytes to the free area. */ | 575 | This may return space to the system. */ |
| 452 | 576 | ||
| 453 | static void | 577 | static void |
| 454 | free_bloc (bloc) | 578 | free_bloc (bloc) |
| @@ -511,51 +635,51 @@ r_alloc_sbrk (size) | |||
| 511 | 635 | ||
| 512 | if (size > 0) | 636 | if (size > 0) |
| 513 | { | 637 | { |
| 514 | /* Allocate a page-aligned space. GNU malloc would reclaim an | 638 | /* Allocate a page-aligned space. GNU malloc would reclaim an |
| 515 | extra space if we passed an unaligned one. But we could | 639 | extra space if we passed an unaligned one. But we could |
| 516 | not always find a space which is contiguos to the previous. */ | 640 | not always find a space which is contiguos to the previous. */ |
| 517 | POINTER new_bloc_start; | 641 | POINTER new_bloc_start; |
| 518 | heap_ptr h = first_heap; | 642 | heap_ptr h = first_heap; |
| 519 | SIZE get = ROUNDUP(size); | 643 | SIZE get = ROUNDUP (size); |
| 520 | 644 | ||
| 521 | address = (POINTER) ROUNDUP(virtual_break_value); | 645 | address = (POINTER) ROUNDUP (virtual_break_value); |
| 522 | 646 | ||
| 523 | /* Search the list upward for a heap which is large enough. */ | 647 | /* Search the list upward for a heap which is large enough. */ |
| 524 | while ((char *) h->end < (char *) MEM_ROUNDUP((char *)address + get)) | 648 | while ((char *) h->end < (char *) MEM_ROUNDUP ((char *)address + get)) |
| 525 | { | 649 | { |
| 526 | h = h->next; | 650 | h = h->next; |
| 527 | if (h == NIL_HEAP) | 651 | if (h == NIL_HEAP) |
| 528 | break; | 652 | break; |
| 529 | address = (POINTER) ROUNDUP(h->start); | 653 | address = (POINTER) ROUNDUP (h->start); |
| 530 | } | 654 | } |
| 531 | 655 | ||
| 532 | /* If not found, obatin more space. */ | 656 | /* If not found, obtain more space. */ |
| 533 | if (h == NIL_HEAP) | 657 | if (h == NIL_HEAP) |
| 534 | { | 658 | { |
| 535 | get += extra_bytes + page_size; | 659 | get += extra_bytes + page_size; |
| 536 | 660 | ||
| 537 | if (r_alloc_freeze_level > 0 || ! obtain(address, get)) | 661 | if (r_alloc_freeze_level > 0 || ! obtain (address, get)) |
| 538 | return 0; | 662 | return 0; |
| 539 | 663 | ||
| 540 | if (first_heap == last_heap) | 664 | if (first_heap == last_heap) |
| 541 | address = (POINTER) ROUNDUP(virtual_break_value); | 665 | address = (POINTER) ROUNDUP (virtual_break_value); |
| 542 | else | 666 | else |
| 543 | address = (POINTER) ROUNDUP(last_heap->start); | 667 | address = (POINTER) ROUNDUP (last_heap->start); |
| 544 | h = last_heap; | 668 | h = last_heap; |
| 545 | } | 669 | } |
| 546 | 670 | ||
| 547 | new_bloc_start = (POINTER) MEM_ROUNDUP((char *)address + get); | 671 | new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get); |
| 548 | 672 | ||
| 549 | if (first_heap->bloc_start < new_bloc_start) | 673 | if (first_heap->bloc_start < new_bloc_start) |
| 550 | { | 674 | { |
| 551 | /* Move all blocs upward. */ | 675 | /* Move all blocs upward. */ |
| 552 | if (r_alloc_freeze_level > 0 | 676 | if (r_alloc_freeze_level > 0 |
| 553 | || ! relocate_blocs (first_bloc, h, new_bloc_start)) | 677 | || ! relocate_blocs (first_bloc, h, new_bloc_start)) |
| 554 | return 0; | 678 | return 0; |
| 555 | 679 | ||
| 556 | /* Note that (POINTER)(h+1) <= new_bloc_start since | 680 | /* Note that (POINTER)(h+1) <= new_bloc_start since |
| 557 | get >= page_size, so the following does not destroy the heap | 681 | get >= page_size, so the following does not destroy the heap |
| 558 | header. */ | 682 | header. */ |
| 559 | for (b = last_bloc; b != NIL_BLOC; b = b->prev) | 683 | for (b = last_bloc; b != NIL_BLOC; b = b->prev) |
| 560 | { | 684 | { |
| 561 | safe_bcopy (b->data, b->new_data, b->size); | 685 | safe_bcopy (b->data, b->new_data, b->size); |
| @@ -563,16 +687,19 @@ r_alloc_sbrk (size) | |||
| 563 | } | 687 | } |
| 564 | 688 | ||
| 565 | h->bloc_start = new_bloc_start; | 689 | h->bloc_start = new_bloc_start; |
| 690 | |||
| 691 | update_heap_free_pointers (first_bloc, h); | ||
| 566 | } | 692 | } |
| 567 | 693 | ||
| 568 | if (h != first_heap) | 694 | if (h != first_heap) |
| 569 | { | 695 | { |
| 570 | /* Give up managing heaps below the one the new | 696 | /* Give up managing heaps below the one the new |
| 571 | virtual_break_value points to. */ | 697 | virtual_break_value points to. */ |
| 572 | first_heap->prev = NIL_HEAP; | 698 | first_heap->prev = NIL_HEAP; |
| 573 | first_heap->next = h->next; | 699 | first_heap->next = h->next; |
| 574 | first_heap->start = h->start; | 700 | first_heap->start = h->start; |
| 575 | first_heap->end = h->end; | 701 | first_heap->end = h->end; |
| 702 | first_heap->free = h->free; | ||
| 576 | first_heap->bloc_start = h->bloc_start; | 703 | first_heap->bloc_start = h->bloc_start; |
| 577 | 704 | ||
| 578 | if (first_heap->next) | 705 | if (first_heap->next) |
| @@ -594,9 +721,9 @@ r_alloc_sbrk (size) | |||
| 594 | { | 721 | { |
| 595 | excess -= extra_bytes; | 722 | excess -= extra_bytes; |
| 596 | first_heap->bloc_start | 723 | first_heap->bloc_start |
| 597 | = (POINTER) MEM_ROUNDUP((char *)first_heap->bloc_start - excess); | 724 | = (POINTER) MEM_ROUNDUP ((char *)first_heap->bloc_start - excess); |
| 598 | 725 | ||
| 599 | relocate_blocs(first_bloc, first_heap, first_heap->bloc_start); | 726 | relocate_blocs (first_bloc, first_heap, first_heap->bloc_start); |
| 600 | 727 | ||
| 601 | for (b = first_bloc; b != NIL_BLOC; b = b->next) | 728 | for (b = first_bloc; b != NIL_BLOC; b = b->next) |
| 602 | { | 729 | { |
| @@ -616,7 +743,7 @@ r_alloc_sbrk (size) | |||
| 616 | break_value = last_bloc ? last_bloc->data + last_bloc->size | 743 | break_value = last_bloc ? last_bloc->data + last_bloc->size |
| 617 | : first_heap->bloc_start; | 744 | : first_heap->bloc_start; |
| 618 | if (size < 0) | 745 | if (size < 0) |
| 619 | relinquish(); | 746 | relinquish (); |
| 620 | 747 | ||
| 621 | return address; | 748 | return address; |
| 622 | } | 749 | } |
| @@ -638,7 +765,7 @@ r_alloc (ptr, size) | |||
| 638 | if (! r_alloc_initialized) | 765 | if (! r_alloc_initialized) |
| 639 | r_alloc_init (); | 766 | r_alloc_init (); |
| 640 | 767 | ||
| 641 | new_bloc = get_bloc (MEM_ROUNDUP(size)); | 768 | new_bloc = get_bloc (MEM_ROUNDUP (size)); |
| 642 | if (new_bloc) | 769 | if (new_bloc) |
| 643 | { | 770 | { |
| 644 | new_bloc->variable = ptr; | 771 | new_bloc->variable = ptr; |
| @@ -692,7 +819,7 @@ r_re_alloc (ptr, size) | |||
| 692 | /* Wouldn't it be useful to actually resize the bloc here? */ | 819 | /* Wouldn't it be useful to actually resize the bloc here? */ |
| 693 | return *ptr; | 820 | return *ptr; |
| 694 | 821 | ||
| 695 | if (! resize_bloc (bloc, MEM_ROUNDUP(size))) | 822 | if (! resize_bloc (bloc, MEM_ROUNDUP (size))) |
| 696 | return 0; | 823 | return 0; |
| 697 | 824 | ||
| 698 | return *ptr; | 825 | return *ptr; |
| @@ -702,6 +829,7 @@ r_re_alloc (ptr, size) | |||
| 702 | of non-relocatable heap if possible. The relocatable blocs are | 829 | of non-relocatable heap if possible. The relocatable blocs are |
| 703 | guaranteed to hold still until thawed, even if this means that | 830 | guaranteed to hold still until thawed, even if this means that |
| 704 | malloc must return a null pointer. */ | 831 | malloc must return a null pointer. */ |
| 832 | |||
| 705 | void | 833 | void |
| 706 | r_alloc_freeze (size) | 834 | r_alloc_freeze (size) |
| 707 | long size; | 835 | long size; |
| @@ -728,12 +856,11 @@ r_alloc_thaw () | |||
| 728 | from the system. */ | 856 | from the system. */ |
| 729 | extern POINTER (*__morecore) (); | 857 | extern POINTER (*__morecore) (); |
| 730 | 858 | ||
| 731 | /* Initialize various things for memory allocation. */ | 859 | /* Initialize various things for memory allocation. */ |
| 732 | 860 | ||
| 733 | static void | 861 | static void |
| 734 | r_alloc_init () | 862 | r_alloc_init () |
| 735 | { | 863 | { |
| 736 | static struct heap heap_base; | ||
| 737 | POINTER end; | 864 | POINTER end; |
| 738 | 865 | ||
| 739 | if (r_alloc_initialized) | 866 | if (r_alloc_initialized) |
| @@ -760,7 +887,7 @@ r_alloc_init () | |||
| 760 | not really the page size of the system running the binary in | 887 | not really the page size of the system running the binary in |
| 761 | which page_size is stored. This allows a binary to be built on a | 888 | which page_size is stored. This allows a binary to be built on a |
| 762 | system with one page size and run on a system with a smaller page | 889 | system with one page size and run on a system with a smaller page |
| 763 | size. */ | 890 | size. */ |
| 764 | (*real_morecore) (first_heap->end - first_heap->start); | 891 | (*real_morecore) (first_heap->end - first_heap->start); |
| 765 | 892 | ||
| 766 | /* Clear the rest of the last page; this memory is in our address space | 893 | /* Clear the rest of the last page; this memory is in our address space |
| @@ -784,19 +911,19 @@ r_alloc_check () | |||
| 784 | if (!r_alloc_initialized) | 911 | if (!r_alloc_initialized) |
| 785 | return; | 912 | return; |
| 786 | 913 | ||
| 787 | assert(first_heap); | 914 | assert (first_heap); |
| 788 | assert(last_heap->end <= (POINTER) sbrk(0)); | 915 | assert (last_heap->end <= (POINTER) sbrk (0)); |
| 789 | assert((POINTER) first_heap < first_heap->start); | 916 | assert ((POINTER) first_heap < first_heap->start); |
| 790 | assert(first_heap->start <= virtual_break_value); | 917 | assert (first_heap->start <= virtual_break_value); |
| 791 | assert(virtual_break_value <= first_heap->end); | 918 | assert (virtual_break_value <= first_heap->end); |
| 792 | 919 | ||
| 793 | for (h = first_heap; h; h = h->next) | 920 | for (h = first_heap; h; h = h->next) |
| 794 | { | 921 | { |
| 795 | assert(h->prev == ph); | 922 | assert (h->prev == ph); |
| 796 | assert((POINTER) ROUNDUP(h->end) == h->end); | 923 | assert ((POINTER) ROUNDUP (h->end) == h->end); |
| 797 | assert((POINTER) MEM_ROUNDUP(h->start) == h->start); | 924 | assert ((POINTER) MEM_ROUNDUP (h->start) == h->start); |
| 798 | assert((POINTER) MEM_ROUNDUP(h->bloc_start) == h->bloc_start); | 925 | assert ((POINTER) MEM_ROUNDUP (h->bloc_start) == h->bloc_start); |
| 799 | assert(h->start <= h->bloc_start && h->bloc_start <= h->end); | 926 | assert (h->start <= h->bloc_start && h->bloc_start <= h->end); |
| 800 | 927 | ||
| 801 | if (ph) | 928 | if (ph) |
| 802 | { | 929 | { |
| @@ -810,14 +937,14 @@ r_alloc_check () | |||
| 810 | ph = h; | 937 | ph = h; |
| 811 | } | 938 | } |
| 812 | 939 | ||
| 813 | assert(found); | 940 | assert (found); |
| 814 | assert(last_heap == ph); | 941 | assert (last_heap == ph); |
| 815 | 942 | ||
| 816 | for (b = first_bloc; b; b = b->next) | 943 | for (b = first_bloc; b; b = b->next) |
| 817 | { | 944 | { |
| 818 | assert(b->prev == pb); | 945 | assert (b->prev == pb); |
| 819 | assert((POINTER) MEM_ROUNDUP(b->data) == b->data); | 946 | assert ((POINTER) MEM_ROUNDUP (b->data) == b->data); |
| 820 | assert((SIZE) MEM_ROUNDUP(b->size) == b->size); | 947 | assert ((SIZE) MEM_ROUNDUP (b->size) == b->size); |
| 821 | 948 | ||
| 822 | ph = 0; | 949 | ph = 0; |
| 823 | for (h = first_heap; h; h = h->next) | 950 | for (h = first_heap; h; h = h->next) |
| @@ -827,22 +954,22 @@ r_alloc_check () | |||
| 827 | ph = h; | 954 | ph = h; |
| 828 | } | 955 | } |
| 829 | 956 | ||
| 830 | assert(h); | 957 | assert (h); |
| 831 | 958 | ||
| 832 | if (pb && pb->data + pb->size != b->data) | 959 | if (pb && pb->data + pb->size != b->data) |
| 833 | { | 960 | { |
| 834 | assert(ph && b->data == h->bloc_start); | 961 | assert (ph && b->data == h->bloc_start); |
| 835 | while (ph) | 962 | while (ph) |
| 836 | { | 963 | { |
| 837 | if (ph->bloc_start <= pb->data | 964 | if (ph->bloc_start <= pb->data |
| 838 | && pb->data + pb->size <= ph->end) | 965 | && pb->data + pb->size <= ph->end) |
| 839 | { | 966 | { |
| 840 | assert(pb->data + pb->size + b->size > ph->end); | 967 | assert (pb->data + pb->size + b->size > ph->end); |
| 841 | break; | 968 | break; |
| 842 | } | 969 | } |
| 843 | else | 970 | else |
| 844 | { | 971 | { |
| 845 | assert(ph->bloc_start + b->size > ph->end); | 972 | assert (ph->bloc_start + b->size > ph->end); |
| 846 | } | 973 | } |
| 847 | ph = ph->prev; | 974 | ph = ph->prev; |
| 848 | } | 975 | } |
| @@ -850,11 +977,11 @@ r_alloc_check () | |||
| 850 | pb = b; | 977 | pb = b; |
| 851 | } | 978 | } |
| 852 | 979 | ||
| 853 | assert(last_bloc == pb); | 980 | assert (last_bloc == pb); |
| 854 | 981 | ||
| 855 | if (last_bloc) | 982 | if (last_bloc) |
| 856 | assert(last_bloc->data + last_bloc->size == break_value); | 983 | assert (last_bloc->data + last_bloc->size == break_value); |
| 857 | else | 984 | else |
| 858 | assert(first_heap->bloc_start == break_value); | 985 | assert (first_heap->bloc_start == break_value); |
| 859 | } | 986 | } |
| 860 | #endif /* DEBUG */ | 987 | #endif /* DEBUG */ |