diff options
| author | Karl Heuer | 1994-10-12 00:48:03 +0000 |
|---|---|---|
| committer | Karl Heuer | 1994-10-12 00:48:03 +0000 |
| commit | e429caa2159278b5aba3d168fd8858d1c665cc3e (patch) | |
| tree | 0018c2700da3094cf756bd8982d96610c16c97f9 /src/ralloc.c | |
| parent | 424b6d2bf8fe0257327e5a69b06ff18c8e0146b9 (diff) | |
| download | emacs-e429caa2159278b5aba3d168fd8858d1c665cc3e.tar.gz emacs-e429caa2159278b5aba3d168fd8858d1c665cc3e.zip | |
Install Hiroshi Nakano's rewrite to allow multiple heaps, for implementations
where the C library makes calls to sbrk directly.
Diffstat (limited to 'src/ralloc.c')
| -rw-r--r-- | src/ralloc.c | 591 |
1 files changed, 449 insertions, 142 deletions
diff --git a/src/ralloc.c b/src/ralloc.c index 9712c26eb36..0ee166913c4 100644 --- a/src/ralloc.c +++ b/src/ralloc.c | |||
| @@ -96,9 +96,6 @@ static POINTER virtual_break_value; | |||
| 96 | /* The break value, viewed by the relocatable blocs. */ | 96 | /* The break value, viewed by the relocatable blocs. */ |
| 97 | static POINTER break_value; | 97 | static POINTER break_value; |
| 98 | 98 | ||
| 99 | /* The REAL (i.e., page aligned) break value of the process. */ | ||
| 100 | static POINTER page_break_value; | ||
| 101 | |||
| 102 | /* This is the size of a page. We round memory requests to this boundary. */ | 99 | /* This is the size of a page. We round memory requests to this boundary. */ |
| 103 | static int page_size; | 100 | static int page_size; |
| 104 | 101 | ||
| @@ -113,102 +110,165 @@ static int extra_bytes; | |||
| 113 | #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ | 110 | #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ |
| 114 | & ~(page_size - 1)) | 111 | & ~(page_size - 1)) |
| 115 | #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) | 112 | #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) |
| 113 | |||
| 114 | #define MEM_ALIGN sizeof(double) | ||
| 115 | #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \ | ||
| 116 | & ~(MEM_ALIGN - 1)) | ||
| 117 | |||
| 118 | /* Data structures of heaps and blocs */ | ||
| 119 | typedef struct heap | ||
| 120 | { | ||
| 121 | struct heap *next; | ||
| 122 | struct heap *prev; | ||
| 123 | POINTER start; | ||
| 124 | POINTER end; | ||
| 125 | POINTER bloc_start; /* start of relocatable blocs */ | ||
| 126 | } *heap_ptr; | ||
| 127 | |||
| 128 | #define NIL_HEAP ((heap_ptr) 0) | ||
| 129 | #define HEAP_PTR_SIZE (sizeof (struct heap)) | ||
| 130 | |||
| 131 | /* Head and tail of the list of heaps. */ | ||
| 132 | static heap_ptr first_heap, last_heap; | ||
| 133 | |||
| 134 | /* These structures are allocated in the malloc arena. | ||
| 135 | The linked list is kept in order of increasing '.data' members. | ||
| 136 | The data blocks abut each other; if b->next is non-nil, then | ||
| 137 | b->data + b->size == b->next->data. */ | ||
| 138 | typedef struct bp | ||
| 139 | { | ||
| 140 | struct bp *next; | ||
| 141 | struct bp *prev; | ||
| 142 | POINTER *variable; | ||
| 143 | POINTER data; | ||
| 144 | SIZE size; | ||
| 145 | POINTER new_data; /* tmporarily used for relocation */ | ||
| 146 | } *bloc_ptr; | ||
| 147 | |||
| 148 | #define NIL_BLOC ((bloc_ptr) 0) | ||
| 149 | #define BLOC_PTR_SIZE (sizeof (struct bp)) | ||
| 150 | |||
| 151 | /* Head and tail of the list of relocatable blocs. */ | ||
| 152 | static bloc_ptr first_bloc, last_bloc; | ||
| 153 | |||
| 116 | 154 | ||
| 117 | /* Functions to get and return memory from the system. */ | 155 | /* Functions to get and return memory from the system. */ |
| 118 | 156 | ||
| 119 | /* Obtain SIZE bytes of space. If enough space is not presently available | 157 | /* Obtain SIZE bytes of space starting at ADDRESS in a heap. |
| 120 | in our process reserve, (i.e., (page_break_value - break_value)), | 158 | If enough space is not presently available in our reserve, this means |
| 121 | this means getting more page-aligned space from the system. | 159 | getting more page-aligned space from the system. If the retuned space |
| 160 | is not contiguos to the last heap, allocate a new heap, and append it | ||
| 161 | to the heap list. | ||
| 122 | 162 | ||
| 123 | Return non-zero if all went well, or zero if we couldn't allocate | 163 | Return the address of the space if all went well, or zero if we couldn't |
| 124 | the memory. */ | 164 | allocate the memory. */ |
| 125 | static int | 165 | static POINTER |
| 126 | obtain (size) | 166 | obtain (address, size) |
| 127 | SIZE size; | 167 | POINTER address; |
| 168 | SIZE size; | ||
| 128 | { | 169 | { |
| 129 | SIZE already_available = page_break_value - break_value; | 170 | heap_ptr heap; |
| 171 | SIZE already_available; | ||
| 130 | 172 | ||
| 131 | if (already_available < size) | 173 | for (heap = last_heap; heap; heap = heap->prev) |
| 132 | { | 174 | { |
| 133 | SIZE get = ROUNDUP (size - already_available); | 175 | if (heap->start <= address && address <= heap->end) |
| 134 | /* Get some extra, so we can come here less often. */ | 176 | break; |
| 135 | get += extra_bytes; | 177 | } |
| 136 | 178 | ||
| 137 | if ((*real_morecore) (get) == 0) | 179 | if (! heap) |
| 138 | return 0; | 180 | abort(); |
| 139 | 181 | ||
| 140 | page_break_value += get; | 182 | while (heap && address + size > heap->end) |
| 183 | { | ||
| 184 | heap = heap->next; | ||
| 185 | if (heap == NIL_HEAP) | ||
| 186 | break; | ||
| 187 | address = heap->bloc_start; | ||
| 141 | } | 188 | } |
| 142 | 189 | ||
| 143 | break_value += size; | 190 | if (heap == NIL_HEAP) |
| 191 | { | ||
| 192 | POINTER new = (*real_morecore)(0); | ||
| 193 | SIZE get; | ||
| 144 | 194 | ||
| 145 | return 1; | 195 | already_available = (char *)last_heap->end - (char *)address; |
| 146 | } | ||
| 147 | 196 | ||
| 148 | /* Obtain SIZE bytes of space and return a pointer to the new area. | 197 | if (new != last_heap->end) |
| 149 | If we could not allocate the space, return zero. */ | 198 | { |
| 199 | /* Someone else called sbrk(). */ | ||
| 200 | heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP(new); | ||
| 201 | POINTER bloc_start = (POINTER) MEM_ROUNDUP((POINTER)(new_heap + 1)); | ||
| 202 | |||
| 203 | if ((*real_morecore) (bloc_start - new) != new) | ||
| 204 | return 0; | ||
| 205 | |||
| 206 | new_heap->start = new; | ||
| 207 | new_heap->end = bloc_start; | ||
| 208 | new_heap->bloc_start = bloc_start; | ||
| 209 | new_heap->next = NIL_HEAP; | ||
| 210 | new_heap->prev = last_heap; | ||
| 211 | last_heap->next = new_heap; | ||
| 212 | last_heap = new_heap; | ||
| 213 | |||
| 214 | address = bloc_start; | ||
| 215 | already_available = 0; | ||
| 216 | } | ||
| 150 | 217 | ||
| 151 | static POINTER | 218 | /* Get some extra, so we can come here less often. */ |
| 152 | get_more_space (size) | 219 | get = size + extra_bytes - already_available; |
| 153 | SIZE size; | 220 | get = (char *) ROUNDUP((char *)last_heap->end + get) |
| 154 | { | 221 | - (char *) last_heap->end; |
| 155 | POINTER ptr = break_value; | ||
| 156 | if (obtain (size)) | ||
| 157 | return ptr; | ||
| 158 | else | ||
| 159 | return 0; | ||
| 160 | } | ||
| 161 | 222 | ||
| 162 | /* Note that SIZE bytes of space have been relinquished by the process. | 223 | if ((*real_morecore) (get) != last_heap->end) |
| 163 | If SIZE is more than a page, return the space to the system. */ | 224 | return 0; |
| 225 | |||
| 226 | last_heap->end += get; | ||
| 227 | } | ||
| 228 | |||
| 229 | return address; | ||
| 230 | } | ||
| 164 | 231 | ||
| 232 | /* If the last heap has a excessive space, return it to the system. */ | ||
| 165 | static void | 233 | static void |
| 166 | relinquish (size) | 234 | relinquish () |
| 167 | SIZE size; | ||
| 168 | { | 235 | { |
| 169 | POINTER new_page_break; | 236 | register heap_ptr h; |
| 170 | int excess; | 237 | int excess = 0; |
| 171 | 238 | ||
| 172 | break_value -= size; | 239 | for (h = last_heap; h && break_value < h->end; h = h->prev) |
| 173 | new_page_break = (POINTER) ROUNDUP (break_value); | 240 | { |
| 174 | excess = (char *) page_break_value - (char *) new_page_break; | 241 | excess += (char *) h->end - (char *) ((break_value < h->bloc_start) |
| 175 | 242 | ? h->bloc_start : break_value); | |
| 176 | if (excess > extra_bytes * 2) | 243 | } |
| 244 | |||
| 245 | if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end) | ||
| 177 | { | 246 | { |
| 178 | /* Keep extra_bytes worth of empty space. | 247 | /* Keep extra_bytes worth of empty space. |
| 179 | And don't free anything unless we can free at least extra_bytes. */ | 248 | And don't free anything unless we can free at least extra_bytes. */ |
| 180 | if ((*real_morecore) (extra_bytes - excess) == 0) | 249 | excess -= extra_bytes; |
| 181 | abort (); | ||
| 182 | 250 | ||
| 183 | page_break_value += extra_bytes - excess; | 251 | if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess) |
| 184 | } | 252 | { |
| 253 | /* Return the last heap with its header to the system */ | ||
| 254 | excess = (char *)last_heap->end - (char *)last_heap->start; | ||
| 255 | last_heap = last_heap->prev; | ||
| 256 | last_heap->next = NIL_HEAP; | ||
| 257 | } | ||
| 258 | else | ||
| 259 | { | ||
| 260 | excess = (char *) last_heap->end | ||
| 261 | - (char *) ROUNDUP((char *)last_heap->end - excess); | ||
| 262 | last_heap->end -= excess; | ||
| 263 | } | ||
| 185 | 264 | ||
| 186 | /* Zero the space from the end of the "official" break to the actual | 265 | if ((*real_morecore) (- excess) == 0) |
| 187 | break, so that bugs show up faster. */ | 266 | abort (); |
| 188 | bzero (break_value, ((char *) page_break_value - (char *) break_value)); | 267 | } |
| 189 | } | 268 | } |
| 190 | 269 | ||
| 191 | /* The meat - allocating, freeing, and relocating blocs. */ | 270 | /* The meat - allocating, freeing, and relocating blocs. */ |
| 192 | 271 | ||
| 193 | /* These structures are allocated in the malloc arena. | ||
| 194 | The linked list is kept in order of increasing '.data' members. | ||
| 195 | The data blocks abut each other; if b->next is non-nil, then | ||
| 196 | b->data + b->size == b->next->data. */ | ||
| 197 | typedef struct bp | ||
| 198 | { | ||
| 199 | struct bp *next; | ||
| 200 | struct bp *prev; | ||
| 201 | POINTER *variable; | ||
| 202 | POINTER data; | ||
| 203 | SIZE size; | ||
| 204 | } *bloc_ptr; | ||
| 205 | |||
| 206 | #define NIL_BLOC ((bloc_ptr) 0) | ||
| 207 | #define BLOC_PTR_SIZE (sizeof (struct bp)) | ||
| 208 | |||
| 209 | /* Head and tail of the list of relocatable blocs. */ | ||
| 210 | static bloc_ptr first_bloc, last_bloc; | ||
| 211 | |||
| 212 | /* Find the bloc referenced by the address in PTR. Returns a pointer | 272 | /* Find the bloc referenced by the address in PTR. Returns a pointer |
| 213 | to that block. */ | 273 | to that block. */ |
| 214 | 274 | ||
| @@ -240,7 +300,7 @@ get_bloc (size) | |||
| 240 | register bloc_ptr new_bloc; | 300 | register bloc_ptr new_bloc; |
| 241 | 301 | ||
| 242 | if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) | 302 | if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) |
| 243 | || ! (new_bloc->data = get_more_space (size))) | 303 | || ! (new_bloc->data = obtain (break_value, size))) |
| 244 | { | 304 | { |
| 245 | if (new_bloc) | 305 | if (new_bloc) |
| 246 | free (new_bloc); | 306 | free (new_bloc); |
| @@ -248,9 +308,12 @@ get_bloc (size) | |||
| 248 | return 0; | 308 | return 0; |
| 249 | } | 309 | } |
| 250 | 310 | ||
| 311 | break_value = new_bloc->data + size; | ||
| 312 | |||
| 251 | new_bloc->size = size; | 313 | new_bloc->size = size; |
| 252 | new_bloc->next = NIL_BLOC; | 314 | new_bloc->next = NIL_BLOC; |
| 253 | new_bloc->variable = (POINTER *) NIL; | 315 | new_bloc->variable = (POINTER *) NIL; |
| 316 | new_bloc->new_data = 0; | ||
| 254 | 317 | ||
| 255 | if (first_bloc) | 318 | if (first_bloc) |
| 256 | { | 319 | { |
| @@ -267,36 +330,122 @@ get_bloc (size) | |||
| 267 | return new_bloc; | 330 | return new_bloc; |
| 268 | } | 331 | } |
| 269 | 332 | ||
| 270 | /* Relocate all blocs from BLOC on upward in the list to the zone | 333 | /* Calculate new locations of blocs in the list begining with BLOC, |
| 271 | indicated by ADDRESS. Direction of relocation is determined by | 334 | whose spaces is started at ADDRESS in HEAP. If enough space is |
| 272 | the position of ADDRESS relative to BLOC->data. | 335 | not presently available in our reserve, obtain() is called for |
| 336 | more space. | ||
| 337 | |||
| 338 | Do not touch the contents of blocs or break_value. */ | ||
| 273 | 339 | ||
| 274 | If BLOC is NIL_BLOC, nothing is done. | 340 | static int |
| 341 | relocate_blocs (bloc, heap, address) | ||
| 342 | bloc_ptr bloc; | ||
| 343 | heap_ptr heap; | ||
| 344 | POINTER address; | ||
| 345 | { | ||
| 346 | register bloc_ptr b = bloc; | ||
| 275 | 347 | ||
| 276 | Note that ordering of blocs is not affected by this function. */ | 348 | while (b) |
| 349 | { | ||
| 350 | while (heap && address + b->size > heap->end) | ||
| 351 | { | ||
| 352 | heap = heap->next; | ||
| 353 | if (heap == NIL_HEAP) | ||
| 354 | break; | ||
| 355 | address = heap->bloc_start; | ||
| 356 | } | ||
| 277 | 357 | ||
| 278 | static void | 358 | if (heap == NIL_HEAP) |
| 279 | relocate_some_blocs (bloc, address) | 359 | { |
| 280 | bloc_ptr bloc; | 360 | register bloc_ptr tb = b; |
| 281 | POINTER address; | 361 | register SIZE s = 0; |
| 362 | |||
| 363 | while (tb != NIL_BLOC) | ||
| 364 | { | ||
| 365 | s += tb->size; | ||
| 366 | tb = tb->next; | ||
| 367 | } | ||
| 368 | |||
| 369 | if (! (address = obtain(address, s))) | ||
| 370 | return 0; | ||
| 371 | |||
| 372 | heap = last_heap; | ||
| 373 | } | ||
| 374 | |||
| 375 | b->new_data = address; | ||
| 376 | address += b->size; | ||
| 377 | b = b->next; | ||
| 378 | } | ||
| 379 | |||
| 380 | return 1; | ||
| 381 | } | ||
| 382 | |||
| 383 | /* Resize BLOC to SIZE bytes. */ | ||
| 384 | static int | ||
| 385 | resize_bloc (bloc, size) | ||
| 386 | bloc_ptr bloc; | ||
| 387 | SIZE size; | ||
| 282 | { | 388 | { |
| 283 | if (bloc != NIL_BLOC) | 389 | register bloc_ptr b; |
| 390 | heap_ptr heap; | ||
| 391 | POINTER address; | ||
| 392 | SIZE old_size; | ||
| 393 | |||
| 394 | if (bloc == NIL_BLOC || size == bloc->size) | ||
| 395 | return 1; | ||
| 396 | |||
| 397 | for (heap = first_heap; heap != NIL_HEAP; heap = heap->next) | ||
| 398 | { | ||
| 399 | if (heap->bloc_start <= bloc->data && bloc->data <= heap->end) | ||
| 400 | break; | ||
| 401 | } | ||
| 402 | |||
| 403 | if (heap == NIL_HEAP) | ||
| 404 | abort(); | ||
| 405 | |||
| 406 | old_size = bloc->size; | ||
| 407 | bloc->size = size; | ||
| 408 | |||
| 409 | /* Note that bloc could be moved into the previous heap. */ | ||
| 410 | address = bloc->prev ? bloc->prev->data + bloc->prev->size | ||
| 411 | : first_heap->bloc_start; | ||
| 412 | while (heap) | ||
| 413 | { | ||
| 414 | if (heap->bloc_start <= address && address <= heap->end) | ||
| 415 | break; | ||
| 416 | heap = heap->prev; | ||
| 417 | } | ||
| 418 | |||
| 419 | if (! relocate_blocs (bloc, heap, address)) | ||
| 420 | { | ||
| 421 | bloc->size = old_size; | ||
| 422 | return 0; | ||
| 423 | } | ||
| 424 | |||
| 425 | if (size > old_size) | ||
| 426 | { | ||
| 427 | for (b = last_bloc; b != bloc; b = b->prev) | ||
| 428 | { | ||
| 429 | safe_bcopy (b->data, b->new_data, b->size); | ||
| 430 | *b->variable = b->data = b->new_data; | ||
| 431 | } | ||
| 432 | safe_bcopy (bloc->data, bloc->new_data, old_size); | ||
| 433 | bzero (bloc->new_data + old_size, size - old_size); | ||
| 434 | *bloc->variable = bloc->data = bloc->new_data; | ||
| 435 | } | ||
| 436 | else | ||
| 284 | { | 437 | { |
| 285 | register SIZE offset = address - bloc->data; | ||
| 286 | register SIZE data_size = 0; | ||
| 287 | register bloc_ptr b; | ||
| 288 | |||
| 289 | for (b = bloc; b != NIL_BLOC; b = b->next) | 438 | for (b = bloc; b != NIL_BLOC; b = b->next) |
| 290 | { | 439 | { |
| 291 | data_size += b->size; | 440 | safe_bcopy (b->data, b->new_data, b->size); |
| 292 | b->data += offset; | 441 | *b->variable = b->data = b->new_data; |
| 293 | *b->variable = b->data; | ||
| 294 | } | 442 | } |
| 295 | |||
| 296 | safe_bcopy (address - offset, address, data_size); | ||
| 297 | } | 443 | } |
| 298 | } | ||
| 299 | 444 | ||
| 445 | break_value = last_bloc ? last_bloc->data + last_bloc->size | ||
| 446 | : first_heap->bloc_start; | ||
| 447 | return 1; | ||
| 448 | } | ||
| 300 | 449 | ||
| 301 | /* Free BLOC from the chain of blocs, relocating any blocs above it | 450 | /* Free BLOC from the chain of blocs, relocating any blocs above it |
| 302 | and returning BLOC->size bytes to the free area. */ | 451 | and returning BLOC->size bytes to the free area. */ |
| @@ -305,6 +454,8 @@ static void | |||
| 305 | free_bloc (bloc) | 454 | free_bloc (bloc) |
| 306 | bloc_ptr bloc; | 455 | bloc_ptr bloc; |
| 307 | { | 456 | { |
| 457 | resize_bloc (bloc, 0); | ||
| 458 | |||
| 308 | if (bloc == first_bloc && bloc == last_bloc) | 459 | if (bloc == first_bloc && bloc == last_bloc) |
| 309 | { | 460 | { |
| 310 | first_bloc = last_bloc = NIL_BLOC; | 461 | first_bloc = last_bloc = NIL_BLOC; |
| @@ -325,8 +476,7 @@ free_bloc (bloc) | |||
| 325 | bloc->prev->next = bloc->next; | 476 | bloc->prev->next = bloc->next; |
| 326 | } | 477 | } |
| 327 | 478 | ||
| 328 | relocate_some_blocs (bloc->next, bloc->data); | 479 | relinquish (); |
| 329 | relinquish (bloc->size); | ||
| 330 | free (bloc); | 480 | free (bloc); |
| 331 | } | 481 | } |
| 332 | 482 | ||
| @@ -350,53 +500,125 @@ POINTER | |||
| 350 | r_alloc_sbrk (size) | 500 | r_alloc_sbrk (size) |
| 351 | long size; | 501 | long size; |
| 352 | { | 502 | { |
| 353 | /* This is the first address not currently available for the heap. */ | 503 | register bloc_ptr b; |
| 354 | POINTER top; | 504 | POINTER address; |
| 355 | /* Amount of empty space below that. */ | ||
| 356 | /* It is not correct to use SIZE here, because that is usually unsigned. | ||
| 357 | ptrdiff_t would be okay, but is not always available. | ||
| 358 | `long' will work in all cases, in practice. */ | ||
| 359 | long already_available; | ||
| 360 | POINTER ptr; | ||
| 361 | 505 | ||
| 362 | if (! use_relocatable_buffers) | 506 | if (! use_relocatable_buffers) |
| 363 | return (*real_morecore) (size); | 507 | return (*real_morecore) (size); |
| 364 | 508 | ||
| 365 | top = first_bloc ? first_bloc->data : page_break_value; | 509 | if (size == 0) |
| 366 | already_available = (char *) top - (char *) virtual_break_value; | 510 | return virtual_break_value; |
| 367 | 511 | ||
| 368 | /* Do we not have enough gap already? */ | 512 | if (size > 0) |
| 369 | if (size > 0 && already_available < size) | ||
| 370 | { | 513 | { |
| 371 | /* Get what we need, plus some extra so we can come here less often. */ | 514 | /* Allocate a page-aligned space. GNU malloc would reclaim an |
| 372 | SIZE get = size - already_available + extra_bytes; | 515 | extra space if we passed an unaligned one. But we could |
| 516 | not always find a space which is contiguos to the previous. */ | ||
| 517 | POINTER new_bloc_start; | ||
| 518 | heap_ptr h = first_heap; | ||
| 519 | SIZE get = ROUNDUP(size); | ||
| 373 | 520 | ||
| 374 | if (r_alloc_freeze_level > 0 || ! obtain (get)) | 521 | address = (POINTER) ROUNDUP(virtual_break_value); |
| 375 | return 0; | 522 | |
| 523 | /* Search the list upward for a heap which is large enough. */ | ||
| 524 | while ((char *) h->end < (char *) MEM_ROUNDUP((char *)address + get)) | ||
| 525 | { | ||
| 526 | h = h->next; | ||
| 527 | if (h == NIL_HEAP) | ||
| 528 | break; | ||
| 529 | address = (POINTER) ROUNDUP(h->start); | ||
| 530 | } | ||
| 531 | |||
| 532 | /* If not found, obatin more space. */ | ||
| 533 | if (h == NIL_HEAP) | ||
| 534 | { | ||
| 535 | get += extra_bytes + page_size; | ||
| 536 | |||
| 537 | if (r_alloc_freeze_level > 0 || ! obtain(address, get)) | ||
| 538 | return 0; | ||
| 376 | 539 | ||
| 377 | if (first_bloc) | 540 | if (first_heap == last_heap) |
| 378 | relocate_some_blocs (first_bloc, first_bloc->data + get); | 541 | address = (POINTER) ROUNDUP(virtual_break_value); |
| 542 | else | ||
| 543 | address = (POINTER) ROUNDUP(last_heap->start); | ||
| 544 | h = last_heap; | ||
| 545 | } | ||
| 546 | |||
| 547 | new_bloc_start = (POINTER) MEM_ROUNDUP((char *)address + get); | ||
| 548 | |||
| 549 | if (first_heap->bloc_start < new_bloc_start) | ||
| 550 | { | ||
| 551 | /* Move all blocs upward. */ | ||
| 552 | if (r_alloc_freeze_level > 0 | ||
| 553 | || ! relocate_blocs (first_bloc, h, new_bloc_start)) | ||
| 554 | return 0; | ||
| 555 | |||
| 556 | /* Note that (POINTER)(h+1) <= new_bloc_start since | ||
| 557 | get >= page_size, so the following does not destroy the heap | ||
| 558 | header. */ | ||
| 559 | for (b = last_bloc; b != NIL_BLOC; b = b->prev) | ||
| 560 | { | ||
| 561 | safe_bcopy (b->data, b->new_data, b->size); | ||
| 562 | *b->variable = b->data = b->new_data; | ||
| 563 | } | ||
| 564 | |||
| 565 | h->bloc_start = new_bloc_start; | ||
| 566 | } | ||
| 379 | 567 | ||
| 380 | /* Zero out the space we just allocated, to help catch bugs | 568 | if (h != first_heap) |
| 381 | quickly. */ | 569 | { |
| 382 | bzero (virtual_break_value, get); | 570 | /* Give up managing heaps below the one the new |
| 571 | virtual_break_value points to. */ | ||
| 572 | first_heap->prev = NIL_HEAP; | ||
| 573 | first_heap->next = h->next; | ||
| 574 | first_heap->start = h->start; | ||
| 575 | first_heap->end = h->end; | ||
| 576 | first_heap->bloc_start = h->bloc_start; | ||
| 577 | |||
| 578 | if (first_heap->next) | ||
| 579 | first_heap->next->prev = first_heap; | ||
| 580 | else | ||
| 581 | last_heap = first_heap; | ||
| 582 | } | ||
| 583 | |||
| 584 | bzero (address, size); | ||
| 383 | } | 585 | } |
| 384 | /* Can we keep extra_bytes of gap while freeing at least extra_bytes? */ | 586 | else /* size < 0 */ |
| 385 | else if (size < 0 && already_available - size > 2 * extra_bytes | ||
| 386 | && r_alloc_freeze_level == 0) | ||
| 387 | { | 587 | { |
| 388 | /* Ok, do so. This is how many to free. */ | 588 | SIZE excess = (char *)first_heap->bloc_start |
| 389 | SIZE give_back = already_available - size - extra_bytes; | 589 | - ((char *)virtual_break_value + size); |
| 590 | |||
| 591 | address = virtual_break_value; | ||
| 592 | |||
| 593 | if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes) | ||
| 594 | { | ||
| 595 | excess -= extra_bytes; | ||
| 596 | first_heap->bloc_start | ||
| 597 | = (POINTER) MEM_ROUNDUP((char *)first_heap->bloc_start - excess); | ||
| 598 | |||
| 599 | relocate_blocs(first_bloc, first_heap, first_heap->bloc_start); | ||
| 390 | 600 | ||
| 391 | if (first_bloc) | 601 | for (b = first_bloc; b != NIL_BLOC; b = b->next) |
| 392 | relocate_some_blocs (first_bloc, first_bloc->data - give_back); | 602 | { |
| 393 | relinquish (give_back); | 603 | safe_bcopy (b->data, b->new_data, b->size); |
| 604 | *b->variable = b->data = b->new_data; | ||
| 605 | } | ||
| 606 | } | ||
| 607 | |||
| 608 | if ((char *)virtual_break_value + size < (char *)first_heap->start) | ||
| 609 | { | ||
| 610 | /* We found an additional space below the first heap */ | ||
| 611 | first_heap->start = (POINTER) ((char *)virtual_break_value + size); | ||
| 612 | } | ||
| 394 | } | 613 | } |
| 395 | 614 | ||
| 396 | ptr = virtual_break_value; | 615 | virtual_break_value = (POINTER) ((char *)address + size); |
| 397 | virtual_break_value += size; | 616 | break_value = last_bloc ? last_bloc->data + last_bloc->size |
| 617 | : first_heap->bloc_start; | ||
| 618 | if (size < 0) | ||
| 619 | relinquish(); | ||
| 398 | 620 | ||
| 399 | return ptr; | 621 | return address; |
| 400 | } | 622 | } |
| 401 | 623 | ||
| 402 | /* Allocate a relocatable bloc of storage of size SIZE. A pointer to | 624 | /* Allocate a relocatable bloc of storage of size SIZE. A pointer to |
| @@ -416,7 +638,7 @@ r_alloc (ptr, size) | |||
| 416 | if (! r_alloc_initialized) | 638 | if (! r_alloc_initialized) |
| 417 | r_alloc_init (); | 639 | r_alloc_init (); |
| 418 | 640 | ||
| 419 | new_bloc = get_bloc (size); | 641 | new_bloc = get_bloc (MEM_ROUNDUP(size)); |
| 420 | if (new_bloc) | 642 | if (new_bloc) |
| 421 | { | 643 | { |
| 422 | new_bloc->variable = ptr; | 644 | new_bloc->variable = ptr; |
| @@ -470,17 +692,9 @@ r_re_alloc (ptr, size) | |||
| 470 | /* Wouldn't it be useful to actually resize the bloc here? */ | 692 | /* Wouldn't it be useful to actually resize the bloc here? */ |
| 471 | return *ptr; | 693 | return *ptr; |
| 472 | 694 | ||
| 473 | if (! obtain (size - bloc->size)) | 695 | if (! resize_bloc (bloc, MEM_ROUNDUP(size))) |
| 474 | return 0; | 696 | return 0; |
| 475 | 697 | ||
| 476 | relocate_some_blocs (bloc->next, bloc->data + size); | ||
| 477 | |||
| 478 | /* Zero out the new space in the bloc, to help catch bugs faster. */ | ||
| 479 | bzero (bloc->data + bloc->size, size - bloc->size); | ||
| 480 | |||
| 481 | /* Indicate that this block has a new size. */ | ||
| 482 | bloc->size = size; | ||
| 483 | |||
| 484 | return *ptr; | 698 | return *ptr; |
| 485 | } | 699 | } |
| 486 | 700 | ||
| @@ -519,6 +733,9 @@ extern POINTER (*__morecore) (); | |||
| 519 | static void | 733 | static void |
| 520 | r_alloc_init () | 734 | r_alloc_init () |
| 521 | { | 735 | { |
| 736 | static struct heap heap_base; | ||
| 737 | POINTER end; | ||
| 738 | |||
| 522 | if (r_alloc_initialized) | 739 | if (r_alloc_initialized) |
| 523 | return; | 740 | return; |
| 524 | 741 | ||
| @@ -526,14 +743,17 @@ r_alloc_init () | |||
| 526 | real_morecore = __morecore; | 743 | real_morecore = __morecore; |
| 527 | __morecore = r_alloc_sbrk; | 744 | __morecore = r_alloc_sbrk; |
| 528 | 745 | ||
| 529 | virtual_break_value = break_value = (*real_morecore) (0); | 746 | first_heap = last_heap = &heap_base; |
| 747 | first_heap->next = first_heap->prev = NIL_HEAP; | ||
| 748 | first_heap->start = first_heap->bloc_start | ||
| 749 | = virtual_break_value = break_value = (*real_morecore) (0); | ||
| 530 | if (break_value == NIL) | 750 | if (break_value == NIL) |
| 531 | abort (); | 751 | abort (); |
| 532 | 752 | ||
| 533 | page_size = PAGE; | 753 | page_size = PAGE; |
| 534 | extra_bytes = ROUNDUP (50000); | 754 | extra_bytes = ROUNDUP (50000); |
| 535 | 755 | ||
| 536 | page_break_value = (POINTER) ROUNDUP (break_value); | 756 | first_heap->end = (POINTER) ROUNDUP (first_heap->start); |
| 537 | 757 | ||
| 538 | /* The extra call to real_morecore guarantees that the end of the | 758 | /* The extra call to real_morecore guarantees that the end of the |
| 539 | address space is a multiple of page_size, even if page_size is | 759 | address space is a multiple of page_size, even if page_size is |
| @@ -541,13 +761,100 @@ r_alloc_init () | |||
| 541 | which page_size is stored. This allows a binary to be built on a | 761 | which page_size is stored. This allows a binary to be built on a |
| 542 | system with one page size and run on a system with a smaller page | 762 | system with one page size and run on a system with a smaller page |
| 543 | size. */ | 763 | size. */ |
| 544 | (*real_morecore) (page_break_value - break_value); | 764 | (*real_morecore) (first_heap->end - first_heap->start); |
| 545 | 765 | ||
| 546 | /* Clear the rest of the last page; this memory is in our address space | 766 | /* Clear the rest of the last page; this memory is in our address space |
| 547 | even though it is after the sbrk value. */ | 767 | even though it is after the sbrk value. */ |
| 548 | /* Doubly true, with the additional call that explicitly adds the | 768 | /* Doubly true, with the additional call that explicitly adds the |
| 549 | rest of that page to the address space. */ | 769 | rest of that page to the address space. */ |
| 550 | bzero (break_value, (page_break_value - break_value)); | 770 | bzero (first_heap->start, first_heap->end - first_heap->start); |
| 551 | virtual_break_value = break_value = page_break_value; | 771 | virtual_break_value = break_value = first_heap->bloc_start = first_heap->end; |
| 552 | use_relocatable_buffers = 1; | 772 | use_relocatable_buffers = 1; |
| 553 | } | 773 | } |
| 774 | #ifdef DEBUG | ||
| 775 | #include <assert.h> | ||
| 776 | |||
| 777 | int | ||
| 778 | r_alloc_check () | ||
| 779 | { | ||
| 780 | int found = 0; | ||
| 781 | heap_ptr h, ph = 0; | ||
| 782 | bloc_ptr b, pb = 0; | ||
| 783 | |||
| 784 | if (!r_alloc_initialized) | ||
| 785 | return; | ||
| 786 | |||
| 787 | assert(first_heap); | ||
| 788 | assert(last_heap->end <= (POINTER) sbrk(0)); | ||
| 789 | assert((POINTER) first_heap < first_heap->start); | ||
| 790 | assert(first_heap->start <= virtual_break_value); | ||
| 791 | assert(virtual_break_value <= first_heap->end); | ||
| 792 | |||
| 793 | for (h = first_heap; h; h = h->next) | ||
| 794 | { | ||
| 795 | assert(h->prev == ph); | ||
| 796 | assert((POINTER) ROUNDUP(h->end) == h->end); | ||
| 797 | assert((POINTER) MEM_ROUNDUP(h->start) == h->start); | ||
| 798 | assert((POINTER) MEM_ROUNDUP(h->bloc_start) == h->bloc_start); | ||
| 799 | assert(h->start <= h->bloc_start && h->bloc_start <= h->end); | ||
| 800 | |||
| 801 | if (ph) | ||
| 802 | { | ||
| 803 | assert (ph->end < h->start); | ||
| 804 | assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start); | ||
| 805 | } | ||
| 806 | |||
| 807 | if (h->bloc_start <= break_value && break_value <= h->end) | ||
| 808 | found = 1; | ||
| 809 | |||
| 810 | ph = h; | ||
| 811 | } | ||
| 812 | |||
| 813 | assert(found); | ||
| 814 | assert(last_heap == ph); | ||
| 815 | |||
| 816 | for (b = first_bloc; b; b = b->next) | ||
| 817 | { | ||
| 818 | assert(b->prev == pb); | ||
| 819 | assert((POINTER) MEM_ROUNDUP(b->data) == b->data); | ||
| 820 | assert((SIZE) MEM_ROUNDUP(b->size) == b->size); | ||
| 821 | |||
| 822 | ph = 0; | ||
| 823 | for (h = first_heap; h; h = h->next) | ||
| 824 | { | ||
| 825 | if (h->bloc_start <= b->data && b->data + b->size <= h->end) | ||
| 826 | break; | ||
| 827 | ph = h; | ||
| 828 | } | ||
| 829 | |||
| 830 | assert(h); | ||
| 831 | |||
| 832 | if (pb && pb->data + pb->size != b->data) | ||
| 833 | { | ||
| 834 | assert(ph && b->data == h->bloc_start); | ||
| 835 | while (ph) | ||
| 836 | { | ||
| 837 | if (ph->bloc_start <= pb->data | ||
| 838 | && pb->data + pb->size <= ph->end) | ||
| 839 | { | ||
| 840 | assert(pb->data + pb->size + b->size > ph->end); | ||
| 841 | break; | ||
| 842 | } | ||
| 843 | else | ||
| 844 | { | ||
| 845 | assert(ph->bloc_start + b->size > ph->end); | ||
| 846 | } | ||
| 847 | ph = ph->prev; | ||
| 848 | } | ||
| 849 | } | ||
| 850 | pb = b; | ||
| 851 | } | ||
| 852 | |||
| 853 | assert(last_bloc == pb); | ||
| 854 | |||
| 855 | if (last_bloc) | ||
| 856 | assert(last_bloc->data + last_bloc->size == break_value); | ||
| 857 | else | ||
| 858 | assert(first_heap->bloc_start == break_value); | ||
| 859 | } | ||
| 860 | #endif /* DEBUG */ | ||