aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorYAMAMOTO Mitsuharu2007-06-26 03:28:17 +0000
committerYAMAMOTO Mitsuharu2007-06-26 03:28:17 +0000
commit8d0d84d25d4e89ff458788d4e3f97212495a7359 (patch)
treeb86660baf6797a5fbc62ccd8523d7d7b53150e3f /src
parent8b0b7849d3644f52e67b96c53651b190c1b940cf (diff)
downloademacs-8d0d84d25d4e89ff458788d4e3f97212495a7359.tar.gz
emacs-8d0d84d25d4e89ff458788d4e3f97212495a7359.zip
[HAVE_GTK_AND_PTHREAD] Check this after including config.h.
(_aligned_blocks_mutex) [USE_PTHREAD]: New variable. (LOCK_ALIGNED_BLOCKS, UNLOCK_ALIGNED_BLOCKS): New macros. (_free_internal, memalign): Use them. (_malloc_mutex, _aligned_blocks_mutex) [USE_PTHREAD]: Initialize to PTHREAD_MUTEX_INITIALIZER. (malloc_initialize_1) [USE_PTHREAD]: Don't use recursive mutex. (morecore_nolock): Rename from morecore. All uses changed. Use only nolock versions of internal allocation functions. (_malloc_internal_nolock, _realloc_internal_nolock) (_free_internal_nolock): New functions created from _malloc_internal, _realloc_internal, and _free_internal. (_malloc_internal, _realloc_internal, _free_internal): Use them. Copy hook value to automatic variable before its use. (memalign): Copy hook value to automatic variable before its use.
Diffstat (limited to 'src')
-rw-r--r--src/ChangeLog18
-rw-r--r--src/gmalloc.c175
2 files changed, 141 insertions, 52 deletions
diff --git a/src/ChangeLog b/src/ChangeLog
index ed8eee83268..024c019c4c0 100644
--- a/src/ChangeLog
+++ b/src/ChangeLog
@@ -1,3 +1,21 @@
12007-06-26 YAMAMOTO Mitsuharu <mituharu@math.s.chiba-u.ac.jp>
2
3 * gmalloc.c [HAVE_GTK_AND_PTHREAD]: Check this after including config.h.
4 (_aligned_blocks_mutex) [USE_PTHREAD]: New variable.
5 (LOCK_ALIGNED_BLOCKS, UNLOCK_ALIGNED_BLOCKS): New macros.
6 (_free_internal, memalign): Use them.
7 (_malloc_mutex, _aligned_blocks_mutex) [USE_PTHREAD]:
8 Initialize to PTHREAD_MUTEX_INITIALIZER.
9 (malloc_initialize_1) [USE_PTHREAD]: Don't use recursive mutex.
10 (morecore_nolock): Rename from morecore. All uses changed.
11 Use only nolock versions of internal allocation functions.
12 (_malloc_internal_nolock, _realloc_internal_nolock)
13 (_free_internal_nolock): New functions created from
14 _malloc_internal, _realloc_internal, and _free_internal.
15 (_malloc_internal, _realloc_internal, _free_internal): Use them.
16 Copy hook value to automatic variable before its use.
17 (memalign): Copy hook value to automatic variable before its use.
18
12007-06-26 Kenichi Handa <handa@m17n.org> 192007-06-26 Kenichi Handa <handa@m17n.org>
2 20
3 * coding.c (Ffind_operation_coding_system): Docstring improved. 21 * coding.c (Ffind_operation_coding_system): Docstring improved.
diff --git a/src/gmalloc.c b/src/gmalloc.c
index 76845828d75..fcd9f655321 100644
--- a/src/gmalloc.c
+++ b/src/gmalloc.c
@@ -1,9 +1,6 @@
1/* This file is no longer automatically generated from libc. */ 1/* This file is no longer automatically generated from libc. */
2 2
3#define _MALLOC_INTERNAL 3#define _MALLOC_INTERNAL
4#ifdef HAVE_GTK_AND_PTHREAD
5#define USE_PTHREAD
6#endif
7 4
8/* The malloc headers and source files from the C library follow here. */ 5/* The malloc headers and source files from the C library follow here. */
9 6
@@ -40,6 +37,10 @@ Fifth Floor, Boston, MA 02110-1301, USA.
40#include <config.h> 37#include <config.h>
41#endif 38#endif
42 39
40#ifdef HAVE_GTK_AND_PTHREAD
41#define USE_PTHREAD
42#endif
43
43#if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \ 44#if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
44 || defined STDC_HEADERS || defined PROTOTYPES) \ 45 || defined STDC_HEADERS || defined PROTOTYPES) \
45 && ! defined (BROKEN_PROTOTYPES)) 46 && ! defined (BROKEN_PROTOTYPES))
@@ -235,14 +236,21 @@ extern __malloc_size_t _bytes_free;
235extern __ptr_t _malloc_internal PP ((__malloc_size_t __size)); 236extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
236extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size)); 237extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
237extern void _free_internal PP ((__ptr_t __ptr)); 238extern void _free_internal PP ((__ptr_t __ptr));
239extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
240extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
241extern void _free_internal_nolock PP ((__ptr_t __ptr));
238 242
239#ifdef USE_PTHREAD 243#ifdef USE_PTHREAD
240extern pthread_mutex_t _malloc_mutex; 244extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
241#define LOCK() pthread_mutex_lock (&_malloc_mutex) 245#define LOCK() pthread_mutex_lock (&_malloc_mutex)
242#define UNLOCK() pthread_mutex_unlock (&_malloc_mutex) 246#define UNLOCK() pthread_mutex_unlock (&_malloc_mutex)
247#define LOCK_ALIGNED_BLOCKS() pthread_mutex_lock (&_aligned_blocks_mutex)
248#define UNLOCK_ALIGNED_BLOCKS() pthread_mutex_unlock (&_aligned_blocks_mutex)
243#else 249#else
244#define LOCK() 250#define LOCK()
245#define UNLOCK() 251#define UNLOCK()
252#define LOCK_ALIGNED_BLOCKS()
253#define UNLOCK_ALIGNED_BLOCKS()
246#endif 254#endif
247 255
248#endif /* _MALLOC_INTERNAL. */ 256#endif /* _MALLOC_INTERNAL. */
@@ -554,7 +562,8 @@ register_heapinfo ()
554 562
555#ifdef USE_PTHREAD 563#ifdef USE_PTHREAD
556static pthread_once_t malloc_init_once_control = PTHREAD_ONCE_INIT; 564static pthread_once_t malloc_init_once_control = PTHREAD_ONCE_INIT;
557pthread_mutex_t _malloc_mutex; 565pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
566pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
558#endif 567#endif
559 568
560static void 569static void
@@ -567,7 +576,9 @@ malloc_initialize_1 ()
567 if (__malloc_initialize_hook) 576 if (__malloc_initialize_hook)
568 (*__malloc_initialize_hook) (); 577 (*__malloc_initialize_hook) ();
569 578
570#ifdef USE_PTHREAD 579 /* We don't use recursive mutex because pthread_mutexattr_init may
580 call malloc internally. */
581#if 0 /* defined (USE_PTHREAD) */
571 { 582 {
572 pthread_mutexattr_t attr; 583 pthread_mutexattr_t attr;
573 584
@@ -616,9 +627,9 @@ static int morecore_recursing;
616 627
617/* Get neatly aligned memory, initializing or 628/* Get neatly aligned memory, initializing or
618 growing the heap info table as necessary. */ 629 growing the heap info table as necessary. */
619static __ptr_t morecore PP ((__malloc_size_t)); 630static __ptr_t morecore_nolock PP ((__malloc_size_t));
620static __ptr_t 631static __ptr_t
621morecore (size) 632morecore_nolock (size)
622 __malloc_size_t size; 633 __malloc_size_t size;
623{ 634{
624 __ptr_t result; 635 __ptr_t result;
@@ -661,7 +672,7 @@ morecore (size)
661 `morecore_recursing' flag and return null. */ 672 `morecore_recursing' flag and return null. */
662 int save = errno; /* Don't want to clobber errno with ENOMEM. */ 673 int save = errno; /* Don't want to clobber errno with ENOMEM. */
663 morecore_recursing = 1; 674 morecore_recursing = 1;
664 newinfo = (malloc_info *) _realloc_internal 675 newinfo = (malloc_info *) _realloc_internal_nolock
665 (_heapinfo, newsize * sizeof (malloc_info)); 676 (_heapinfo, newsize * sizeof (malloc_info));
666 morecore_recursing = 0; 677 morecore_recursing = 0;
667 if (newinfo == NULL) 678 if (newinfo == NULL)
@@ -717,7 +728,7 @@ morecore (size)
717 /* Reset _heaplimit so _free_internal never decides 728 /* Reset _heaplimit so _free_internal never decides
718 it can relocate or resize the info table. */ 729 it can relocate or resize the info table. */
719 _heaplimit = 0; 730 _heaplimit = 0;
720 _free_internal (oldinfo); 731 _free_internal_nolock (oldinfo);
721 PROTECT_MALLOC_STATE (0); 732 PROTECT_MALLOC_STATE (0);
722 733
723 /* The new heap limit includes the new table just allocated. */ 734 /* The new heap limit includes the new table just allocated. */
@@ -732,7 +743,7 @@ morecore (size)
732 743
733/* Allocate memory from the heap. */ 744/* Allocate memory from the heap. */
734__ptr_t 745__ptr_t
735_malloc_internal (size) 746_malloc_internal_nolock (size)
736 __malloc_size_t size; 747 __malloc_size_t size;
737{ 748{
738 __ptr_t result; 749 __ptr_t result;
@@ -752,7 +763,6 @@ _malloc_internal (size)
752 return NULL; 763 return NULL;
753#endif 764#endif
754 765
755 LOCK ();
756 PROTECT_MALLOC_STATE (0); 766 PROTECT_MALLOC_STATE (0);
757 767
758 if (size < sizeof (struct list)) 768 if (size < sizeof (struct list))
@@ -802,8 +812,10 @@ _malloc_internal (size)
802 /* No free fragments of the desired size, so get a new block 812 /* No free fragments of the desired size, so get a new block
803 and break it into fragments, returning the first. */ 813 and break it into fragments, returning the first. */
804#ifdef GC_MALLOC_CHECK 814#ifdef GC_MALLOC_CHECK
805 result = _malloc_internal (BLOCKSIZE); 815 result = _malloc_internal_nolock (BLOCKSIZE);
806 PROTECT_MALLOC_STATE (0); 816 PROTECT_MALLOC_STATE (0);
817#elif defined (USE_PTHREAD)
818 result = _malloc_internal_nolock (BLOCKSIZE);
807#else 819#else
808 result = malloc (BLOCKSIZE); 820 result = malloc (BLOCKSIZE);
809#endif 821#endif
@@ -874,7 +886,7 @@ _malloc_internal (size)
874 _heaplimit += wantblocks - lastblocks; 886 _heaplimit += wantblocks - lastblocks;
875 continue; 887 continue;
876 } 888 }
877 result = morecore (wantblocks * BLOCKSIZE); 889 result = morecore_nolock (wantblocks * BLOCKSIZE);
878 if (result == NULL) 890 if (result == NULL)
879 goto out; 891 goto out;
880 block = BLOCK (result); 892 block = BLOCK (result);
@@ -932,7 +944,19 @@ _malloc_internal (size)
932 944
933 PROTECT_MALLOC_STATE (1); 945 PROTECT_MALLOC_STATE (1);
934 out: 946 out:
947 return result;
948}
949
950__ptr_t
951_malloc_internal (size)
952 __malloc_size_t size;
953{
954 __ptr_t result;
955
956 LOCK ();
957 result = _malloc_internal_nolock (size);
935 UNLOCK (); 958 UNLOCK ();
959
936 return result; 960 return result;
937} 961}
938 962
@@ -940,10 +964,21 @@ __ptr_t
940malloc (size) 964malloc (size)
941 __malloc_size_t size; 965 __malloc_size_t size;
942{ 966{
967 __ptr_t (*hook) (__malloc_size_t);
968
943 if (!__malloc_initialized && !__malloc_initialize ()) 969 if (!__malloc_initialized && !__malloc_initialize ())
944 return NULL; 970 return NULL;
945 971
946 return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size); 972 /* Copy the value of __malloc_hook to an automatic variable in case
973 __malloc_hook is modified in another thread between its
974 NULL-check and the use.
975
976 Note: Strictly speaking, this is not a right solution. We should
977 use mutexes to access non-read-only variables that are shared
978 among multiple threads. We just leave it for compatibility with
979 glibc malloc (i.e., assignments to __malloc_hook) for now. */
980 hook = __malloc_hook;
981 return (hook != NULL ? *hook : _malloc_internal) (size);
947} 982}
948 983
949#ifndef _LIBC 984#ifndef _LIBC
@@ -1024,9 +1059,9 @@ void (*__free_hook) PP ((__ptr_t __ptr));
1024struct alignlist *_aligned_blocks = NULL; 1059struct alignlist *_aligned_blocks = NULL;
1025 1060
1026/* Return memory to the heap. 1061/* Return memory to the heap.
1027 Like `free' but don't call a __free_hook if there is one. */ 1062 Like `_free_internal' but don't lock mutex. */
1028void 1063void
1029_free_internal (ptr) 1064_free_internal_nolock (ptr)
1030 __ptr_t ptr; 1065 __ptr_t ptr;
1031{ 1066{
1032 int type; 1067 int type;
@@ -1043,9 +1078,9 @@ _free_internal (ptr)
1043 if (ptr == NULL) 1078 if (ptr == NULL)
1044 return; 1079 return;
1045 1080
1046 LOCK ();
1047 PROTECT_MALLOC_STATE (0); 1081 PROTECT_MALLOC_STATE (0);
1048 1082
1083 LOCK_ALIGNED_BLOCKS ();
1049 for (l = _aligned_blocks; l != NULL; l = l->next) 1084 for (l = _aligned_blocks; l != NULL; l = l->next)
1050 if (l->aligned == ptr) 1085 if (l->aligned == ptr)
1051 { 1086 {
@@ -1053,6 +1088,7 @@ _free_internal (ptr)
1053 ptr = l->exact; 1088 ptr = l->exact;
1054 break; 1089 break;
1055 } 1090 }
1091 UNLOCK_ALIGNED_BLOCKS ();
1056 1092
1057 block = BLOCK (ptr); 1093 block = BLOCK (ptr);
1058 1094
@@ -1158,7 +1194,7 @@ _free_internal (ptr)
1158 table's blocks to the system before we have copied them to 1194 table's blocks to the system before we have copied them to
1159 the new location. */ 1195 the new location. */
1160 _heaplimit = 0; 1196 _heaplimit = 0;
1161 _free_internal (_heapinfo); 1197 _free_internal_nolock (_heapinfo);
1162 _heaplimit = oldlimit; 1198 _heaplimit = oldlimit;
1163 1199
1164 /* Tell malloc to search from the beginning of the heap for 1200 /* Tell malloc to search from the beginning of the heap for
@@ -1166,8 +1202,8 @@ _free_internal (ptr)
1166 _heapindex = 0; 1202 _heapindex = 0;
1167 1203
1168 /* Allocate new space for the info table and move its data. */ 1204 /* Allocate new space for the info table and move its data. */
1169 newinfo = (malloc_info *) _malloc_internal (info_blocks 1205 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1170 * BLOCKSIZE); 1206 * BLOCKSIZE);
1171 PROTECT_MALLOC_STATE (0); 1207 PROTECT_MALLOC_STATE (0);
1172 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE); 1208 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1173 _heapinfo = newinfo; 1209 _heapinfo = newinfo;
@@ -1230,8 +1266,8 @@ _free_internal (ptr)
1230 _chunks_free -= BLOCKSIZE >> type; 1266 _chunks_free -= BLOCKSIZE >> type;
1231 _bytes_free -= BLOCKSIZE; 1267 _bytes_free -= BLOCKSIZE;
1232 1268
1233#ifdef GC_MALLOC_CHECK 1269#if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1234 _free_internal (ADDRESS (block)); 1270 _free_internal_nolock (ADDRESS (block));
1235#else 1271#else
1236 free (ADDRESS (block)); 1272 free (ADDRESS (block));
1237#endif 1273#endif
@@ -1269,6 +1305,16 @@ _free_internal (ptr)
1269 } 1305 }
1270 1306
1271 PROTECT_MALLOC_STATE (1); 1307 PROTECT_MALLOC_STATE (1);
1308}
1309
1310/* Return memory to the heap.
1311 Like `free' but don't call a __free_hook if there is one. */
1312void
1313_free_internal (ptr)
1314 __ptr_t ptr;
1315{
1316 LOCK ();
1317 _free_internal_nolock (ptr);
1272 UNLOCK (); 1318 UNLOCK ();
1273} 1319}
1274 1320
@@ -1278,8 +1324,10 @@ FREE_RETURN_TYPE
1278free (ptr) 1324free (ptr)
1279 __ptr_t ptr; 1325 __ptr_t ptr;
1280{ 1326{
1281 if (__free_hook != NULL) 1327 void (*hook) (__ptr_t) = __free_hook;
1282 (*__free_hook) (ptr); 1328
1329 if (hook != NULL)
1330 (*hook) (ptr);
1283 else 1331 else
1284 _free_internal (ptr); 1332 _free_internal (ptr);
1285} 1333}
@@ -1415,7 +1463,7 @@ __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size));
1415 new region. This module has incestuous knowledge of the 1463 new region. This module has incestuous knowledge of the
1416 internals of both free and malloc. */ 1464 internals of both free and malloc. */
1417__ptr_t 1465__ptr_t
1418_realloc_internal (ptr, size) 1466_realloc_internal_nolock (ptr, size)
1419 __ptr_t ptr; 1467 __ptr_t ptr;
1420 __malloc_size_t size; 1468 __malloc_size_t size;
1421{ 1469{
@@ -1425,15 +1473,14 @@ _realloc_internal (ptr, size)
1425 1473
1426 if (size == 0) 1474 if (size == 0)
1427 { 1475 {
1428 _free_internal (ptr); 1476 _free_internal_nolock (ptr);
1429 return _malloc_internal (0); 1477 return _malloc_internal_nolock (0);
1430 } 1478 }
1431 else if (ptr == NULL) 1479 else if (ptr == NULL)
1432 return _malloc_internal (size); 1480 return _malloc_internal_nolock (size);
1433 1481
1434 block = BLOCK (ptr); 1482 block = BLOCK (ptr);
1435 1483
1436 LOCK ();
1437 PROTECT_MALLOC_STATE (0); 1484 PROTECT_MALLOC_STATE (0);
1438 1485
1439 type = _heapinfo[block].busy.type; 1486 type = _heapinfo[block].busy.type;
@@ -1443,11 +1490,11 @@ _realloc_internal (ptr, size)
1443 /* Maybe reallocate a large block to a small fragment. */ 1490 /* Maybe reallocate a large block to a small fragment. */
1444 if (size <= BLOCKSIZE / 2) 1491 if (size <= BLOCKSIZE / 2)
1445 { 1492 {
1446 result = _malloc_internal (size); 1493 result = _malloc_internal_nolock (size);
1447 if (result != NULL) 1494 if (result != NULL)
1448 { 1495 {
1449 memcpy (result, ptr, size); 1496 memcpy (result, ptr, size);
1450 _free_internal (ptr); 1497 _free_internal_nolock (ptr);
1451 goto out; 1498 goto out;
1452 } 1499 }
1453 } 1500 }
@@ -1467,7 +1514,7 @@ _realloc_internal (ptr, size)
1467 Now we will free this chunk; increment the statistics counter 1514 Now we will free this chunk; increment the statistics counter
1468 so it doesn't become wrong when _free_internal decrements it. */ 1515 so it doesn't become wrong when _free_internal decrements it. */
1469 ++_chunks_used; 1516 ++_chunks_used;
1470 _free_internal (ADDRESS (block + blocks)); 1517 _free_internal_nolock (ADDRESS (block + blocks));
1471 result = ptr; 1518 result = ptr;
1472 } 1519 }
1473 else if (blocks == _heapinfo[block].busy.info.size) 1520 else if (blocks == _heapinfo[block].busy.info.size)
@@ -1482,8 +1529,8 @@ _realloc_internal (ptr, size)
1482 /* Prevent free from actually returning memory to the system. */ 1529 /* Prevent free from actually returning memory to the system. */
1483 oldlimit = _heaplimit; 1530 oldlimit = _heaplimit;
1484 _heaplimit = 0; 1531 _heaplimit = 0;
1485 _free_internal (ptr); 1532 _free_internal_nolock (ptr);
1486 result = _malloc_internal (size); 1533 result = _malloc_internal_nolock (size);
1487 PROTECT_MALLOC_STATE (0); 1534 PROTECT_MALLOC_STATE (0);
1488 if (_heaplimit == 0) 1535 if (_heaplimit == 0)
1489 _heaplimit = oldlimit; 1536 _heaplimit = oldlimit;
@@ -1493,13 +1540,13 @@ _realloc_internal (ptr, size)
1493 the thing we just freed. Unfortunately it might 1540 the thing we just freed. Unfortunately it might
1494 have been coalesced with its neighbors. */ 1541 have been coalesced with its neighbors. */
1495 if (_heapindex == block) 1542 if (_heapindex == block)
1496 (void) _malloc_internal (blocks * BLOCKSIZE); 1543 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1497 else 1544 else
1498 { 1545 {
1499 __ptr_t previous 1546 __ptr_t previous
1500 = _malloc_internal ((block - _heapindex) * BLOCKSIZE); 1547 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1501 (void) _malloc_internal (blocks * BLOCKSIZE); 1548 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1502 _free_internal (previous); 1549 _free_internal_nolock (previous);
1503 } 1550 }
1504 goto out; 1551 goto out;
1505 } 1552 }
@@ -1519,18 +1566,31 @@ _realloc_internal (ptr, size)
1519 { 1566 {
1520 /* The new size is different; allocate a new space, 1567 /* The new size is different; allocate a new space,
1521 and copy the lesser of the new size and the old. */ 1568 and copy the lesser of the new size and the old. */
1522 result = _malloc_internal (size); 1569 result = _malloc_internal_nolock (size);
1523 if (result == NULL) 1570 if (result == NULL)
1524 goto out; 1571 goto out;
1525 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type)); 1572 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1526 _free_internal (ptr); 1573 _free_internal_nolock (ptr);
1527 } 1574 }
1528 break; 1575 break;
1529 } 1576 }
1530 1577
1531 PROTECT_MALLOC_STATE (1); 1578 PROTECT_MALLOC_STATE (1);
1532 out: 1579 out:
1580 return result;
1581}
1582
1583__ptr_t
1584_realloc_internal (ptr, size)
1585 __ptr_t ptr;
1586 __malloc_size_t size;
1587{
1588 __ptr_t result;
1589
1590 LOCK();
1591 result = _realloc_internal_nolock (ptr, size);
1533 UNLOCK (); 1592 UNLOCK ();
1593
1534 return result; 1594 return result;
1535} 1595}
1536 1596
@@ -1539,11 +1599,13 @@ realloc (ptr, size)
1539 __ptr_t ptr; 1599 __ptr_t ptr;
1540 __malloc_size_t size; 1600 __malloc_size_t size;
1541{ 1601{
1602 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1603
1542 if (!__malloc_initialized && !__malloc_initialize ()) 1604 if (!__malloc_initialized && !__malloc_initialize ())
1543 return NULL; 1605 return NULL;
1544 1606
1545 return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal) 1607 hook = __realloc_hook;
1546 (ptr, size); 1608 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1547} 1609}
1548/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc. 1610/* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1549 1611
@@ -1681,9 +1743,10 @@ memalign (alignment, size)
1681{ 1743{
1682 __ptr_t result; 1744 __ptr_t result;
1683 unsigned long int adj, lastadj; 1745 unsigned long int adj, lastadj;
1746 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
1684 1747
1685 if (__memalign_hook) 1748 if (hook)
1686 return (*__memalign_hook) (alignment, size); 1749 return (*hook) (alignment, size);
1687 1750
1688 /* Allocate a block with enough extra space to pad the block with up to 1751 /* Allocate a block with enough extra space to pad the block with up to
1689 (ALIGNMENT - 1) bytes if necessary. */ 1752 (ALIGNMENT - 1) bytes if necessary. */
@@ -1718,6 +1781,7 @@ memalign (alignment, size)
1718 of an allocated block. */ 1781 of an allocated block. */
1719 1782
1720 struct alignlist *l; 1783 struct alignlist *l;
1784 LOCK_ALIGNED_BLOCKS ();
1721 for (l = _aligned_blocks; l != NULL; l = l->next) 1785 for (l = _aligned_blocks; l != NULL; l = l->next)
1722 if (l->aligned == NULL) 1786 if (l->aligned == NULL)
1723 /* This slot is free. Use it. */ 1787 /* This slot is free. Use it. */
@@ -1725,16 +1789,23 @@ memalign (alignment, size)
1725 if (l == NULL) 1789 if (l == NULL)
1726 { 1790 {
1727 l = (struct alignlist *) malloc (sizeof (struct alignlist)); 1791 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1728 if (l == NULL) 1792 if (l != NULL)
1729 { 1793 {
1730 free (result); 1794 l->next = _aligned_blocks;
1731 return NULL; 1795 _aligned_blocks = l;
1732 } 1796 }
1733 l->next = _aligned_blocks;
1734 _aligned_blocks = l;
1735 } 1797 }
1736 l->exact = result; 1798 if (l != NULL)
1737 result = l->aligned = (char *) result + alignment - adj; 1799 {
1800 l->exact = result;
1801 result = l->aligned = (char *) result + alignment - adj;
1802 }
1803 UNLOCK_ALIGNED_BLOCKS ();
1804 if (l == NULL)
1805 {
1806 free (result);
1807 result = NULL;
1808 }
1738 } 1809 }
1739 1810
1740 return result; 1811 return result;