aboutsummaryrefslogtreecommitdiffstats
path: root/gc/include
diff options
context:
space:
mode:
authorDave Love2003-06-05 18:00:24 +0000
committerDave Love2003-06-05 18:00:24 +0000
commitd0982fbddb5d9202766a24ace3313b281a0a2eff (patch)
treee1b13c9ff11ae2359d1b1dd0ca33a9b9be197fd8 /gc/include
parent460ff54e9d7a1aca9043ac267025e17b7b299595 (diff)
downloademacs-d0982fbddb5d9202766a24ace3313b281a0a2eff.tar.gz
emacs-d0982fbddb5d9202766a24ace3313b281a0a2eff.zip
Not committed to branch, sorry.
Diffstat (limited to 'gc/include')
-rw-r--r--gc/include/Makefile.am33
-rw-r--r--gc/include/Makefile.in326
-rw-r--r--gc/include/cord.h327
-rw-r--r--gc/include/ec.h70
-rw-r--r--gc/include/gc.h952
-rw-r--r--gc/include/gc_alloc.h383
-rw-r--r--gc/include/gc_allocator.h232
-rw-r--r--gc/include/gc_amiga_redirects.h30
-rw-r--r--gc/include/gc_backptr.h65
-rw-r--r--gc/include/gc_config_macros.h140
-rw-r--r--gc/include/gc_cpp.h362
-rw-r--r--gc/include/gc_gcj.h102
-rw-r--r--gc/include/gc_inl.h107
-rw-r--r--gc/include/gc_inline.h1
-rw-r--r--gc/include/gc_local_alloc.h88
-rw-r--r--gc/include/gc_mark.h147
-rw-r--r--gc/include/gc_pthread_redirects.h67
-rw-r--r--gc/include/gc_typed.h113
-rw-r--r--gc/include/javaxfc.h41
-rw-r--r--gc/include/leak_detector.h7
-rw-r--r--gc/include/new_gc_alloc.h480
-rw-r--r--gc/include/private/cord_pos.h118
-rw-r--r--gc/include/private/dbg_mlc.h176
-rw-r--r--gc/include/private/gc_hdrs.h233
-rw-r--r--gc/include/private/gc_locks.h581
-rw-r--r--gc/include/private/gc_pmark.h397
-rw-r--r--gc/include/private/gc_priv.h1914
-rw-r--r--gc/include/private/gcconfig.h2085
-rw-r--r--gc/include/private/solaris_threads.h35
-rw-r--r--gc/include/private/specific.h95
-rw-r--r--gc/include/weakpointer.h221
31 files changed, 0 insertions, 9928 deletions
diff --git a/gc/include/Makefile.am b/gc/include/Makefile.am
deleted file mode 100644
index d754edb9b83..00000000000
--- a/gc/include/Makefile.am
+++ /dev/null
@@ -1,33 +0,0 @@
1#
2#
3# THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
4# OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
5#
6# Permission is hereby granted to use or copy this program
7# for any purpose, provided the above notices are retained on all copies.
8# Permission to modify the code and to distribute modified code is granted,
9# provided the above notices are retained, and a notice that the code was
10# modified is included with the above copyright notice.
11#
12# Modified by: Grzegorz Jakacki <jakacki at acm dot org>
13
14## Process this file with automake to produce Makefile.in.
15
16# installed headers
17#
18pkginclude_HEADERS = gc.h gc_typed.h gc_inl.h \
19 gc_inline.h gc_mark.h gc_cpp.h \
20 weakpointer.h gc_alloc.h new_gc_alloc.h \
21 gc_allocator.h gc_backptr.h \
22 gc_gcj.h gc_local_alloc.h leak_detector.h \
23 gc_amiga_redirects.h gc_pthread_redirects.h \
24 gc_config_macros.h
25
26# headers which are not installed
27#
28dist_noinst_HEADERS = private/gc_hdrs.h \
29 private/gc_priv.h private/gcconfig.h \
30 private/gc_pmark.h private/gc_locks.h \
31 private/solaris_threads.h private/dbg_mlc.h \
32 private/specific.h private/cord_pos.h \
33 cord.h ec.h javaxfc.h
diff --git a/gc/include/Makefile.in b/gc/include/Makefile.in
deleted file mode 100644
index a8dab022a04..00000000000
--- a/gc/include/Makefile.in
+++ /dev/null
@@ -1,326 +0,0 @@
1# Makefile.in generated by automake 1.6.3 from Makefile.am.
2# @configure_input@
3
4# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002
5# Free Software Foundation, Inc.
6# This Makefile.in is free software; the Free Software Foundation
7# gives unlimited permission to copy and/or distribute it,
8# with or without modifications, as long as this notice is preserved.
9
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
12# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
13# PARTICULAR PURPOSE.
14
15@SET_MAKE@
16
17#
18#
19# THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
20# OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
21#
22# Permission is hereby granted to use or copy this program
23# for any purpose, provided the above notices are retained on all copies.
24# Permission to modify the code and to distribute modified code is granted,
25# provided the above notices are retained, and a notice that the code was
26# modified is included with the above copyright notice.
27#
28# Modified by: Grzegorz Jakacki <jakacki at acm dot org>
29SHELL = @SHELL@
30
31srcdir = @srcdir@
32top_srcdir = @top_srcdir@
33VPATH = @srcdir@
34prefix = @prefix@
35exec_prefix = @exec_prefix@
36
37bindir = @bindir@
38sbindir = @sbindir@
39libexecdir = @libexecdir@
40datadir = @datadir@
41sysconfdir = @sysconfdir@
42sharedstatedir = @sharedstatedir@
43localstatedir = @localstatedir@
44libdir = @libdir@
45infodir = @infodir@
46mandir = @mandir@
47includedir = @includedir@
48oldincludedir = /usr/include
49pkgdatadir = $(datadir)/@PACKAGE@
50pkglibdir = $(libdir)/@PACKAGE@
51pkgincludedir = $(includedir)/@PACKAGE@
52top_builddir = ..
53
54ACLOCAL = @ACLOCAL@
55AUTOCONF = @AUTOCONF@
56AUTOMAKE = @AUTOMAKE@
57AUTOHEADER = @AUTOHEADER@
58
59am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
60INSTALL = @INSTALL@
61INSTALL_PROGRAM = @INSTALL_PROGRAM@
62INSTALL_DATA = @INSTALL_DATA@
63install_sh_DATA = $(install_sh) -c -m 644
64install_sh_PROGRAM = $(install_sh) -c
65install_sh_SCRIPT = $(install_sh) -c
66INSTALL_SCRIPT = @INSTALL_SCRIPT@
67INSTALL_HEADER = $(INSTALL_DATA)
68transform = @program_transform_name@
69NORMAL_INSTALL = :
70PRE_INSTALL = :
71POST_INSTALL = :
72NORMAL_UNINSTALL = :
73PRE_UNINSTALL = :
74POST_UNINSTALL = :
75host_alias = @host_alias@
76host_triplet = @host@
77
78EXEEXT = @EXEEXT@
79OBJEXT = @OBJEXT@
80PATH_SEPARATOR = @PATH_SEPARATOR@
81AMTAR = @AMTAR@
82AR = @AR@
83AS = @AS@
84AWK = @AWK@
85CC = @CC@
86CCAS = @CCAS@
87CCASFLAGS = @CCASFLAGS@
88CFLAGS = @CFLAGS@
89CXX = @CXX@
90CXXFLAGS = @CXXFLAGS@
91CXXINCLUDES = @CXXINCLUDES@
92DEPDIR = @DEPDIR@
93DLLTOOL = @DLLTOOL@
94ECHO = @ECHO@
95EXTRA_TEST_LIBS = @EXTRA_TEST_LIBS@
96GC_CFLAGS = @GC_CFLAGS@
97GC_VERSION = @GC_VERSION@
98INCLUDES = @INCLUDES@
99INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
100LIBTOOL = @LIBTOOL@
101LN_S = @LN_S@
102MAINT = @MAINT@
103MY_CFLAGS = @MY_CFLAGS@
104OBJDUMP = @OBJDUMP@
105PACKAGE = @PACKAGE@
106RANLIB = @RANLIB@
107STRIP = @STRIP@
108THREADLIBS = @THREADLIBS@
109VERSION = @VERSION@
110addincludes = @addincludes@
111addobjs = @addobjs@
112addtests = @addtests@
113am__include = @am__include@
114am__quote = @am__quote@
115install_sh = @install_sh@
116target_all = @target_all@
117
118# installed headers
119#
120pkginclude_HEADERS = gc.h gc_typed.h gc_inl.h \
121 gc_inline.h gc_mark.h gc_cpp.h \
122 weakpointer.h gc_alloc.h new_gc_alloc.h \
123 gc_allocator.h gc_backptr.h \
124 gc_gcj.h gc_local_alloc.h leak_detector.h \
125 gc_amiga_redirects.h gc_pthread_redirects.h \
126 gc_config_macros.h
127
128
129# headers which are not installed
130#
131dist_noinst_HEADERS = private/gc_hdrs.h \
132 private/gc_priv.h private/gcconfig.h \
133 private/gc_pmark.h private/gc_locks.h \
134 private/solaris_threads.h private/dbg_mlc.h \
135 private/specific.h private/cord_pos.h \
136 cord.h ec.h javaxfc.h
137
138subdir = include
139mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
140CONFIG_CLEAN_FILES =
141DIST_SOURCES =
142HEADERS = $(dist_noinst_HEADERS) $(pkginclude_HEADERS)
143
144DIST_COMMON = $(dist_noinst_HEADERS) $(pkginclude_HEADERS) Makefile.am \
145 Makefile.in
146all: all-am
147
148.SUFFIXES:
149$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.in $(ACLOCAL_M4)
150 cd $(top_srcdir) && \
151 $(AUTOMAKE) --gnu include/Makefile
152Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status
153 cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)
154
155mostlyclean-libtool:
156 -rm -f *.lo
157
158clean-libtool:
159 -rm -rf .libs _libs
160
161distclean-libtool:
162 -rm -f libtool
163uninstall-info-am:
164pkgincludeHEADERS_INSTALL = $(INSTALL_HEADER)
165install-pkgincludeHEADERS: $(pkginclude_HEADERS)
166 @$(NORMAL_INSTALL)
167 $(mkinstalldirs) $(DESTDIR)$(pkgincludedir)
168 @list='$(pkginclude_HEADERS)'; for p in $$list; do \
169 if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
170 f="`echo $$p | sed -e 's|^.*/||'`"; \
171 echo " $(pkgincludeHEADERS_INSTALL) $$d$$p $(DESTDIR)$(pkgincludedir)/$$f"; \
172 $(pkgincludeHEADERS_INSTALL) $$d$$p $(DESTDIR)$(pkgincludedir)/$$f; \
173 done
174
175uninstall-pkgincludeHEADERS:
176 @$(NORMAL_UNINSTALL)
177 @list='$(pkginclude_HEADERS)'; for p in $$list; do \
178 f="`echo $$p | sed -e 's|^.*/||'`"; \
179 echo " rm -f $(DESTDIR)$(pkgincludedir)/$$f"; \
180 rm -f $(DESTDIR)$(pkgincludedir)/$$f; \
181 done
182
183ETAGS = etags
184ETAGSFLAGS =
185
186tags: TAGS
187
188ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
189 list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
190 unique=`for i in $$list; do \
191 if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
192 done | \
193 $(AWK) ' { files[$$0] = 1; } \
194 END { for (i in files) print i; }'`; \
195 mkid -fID $$unique
196
197TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
198 $(TAGS_FILES) $(LISP)
199 tags=; \
200 here=`pwd`; \
201 list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
202 unique=`for i in $$list; do \
203 if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
204 done | \
205 $(AWK) ' { files[$$0] = 1; } \
206 END { for (i in files) print i; }'`; \
207 test -z "$(ETAGS_ARGS)$$tags$$unique" \
208 || $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
209 $$tags $$unique
210
211GTAGS:
212 here=`$(am__cd) $(top_builddir) && pwd` \
213 && cd $(top_srcdir) \
214 && gtags -i $(GTAGS_ARGS) $$here
215
216distclean-tags:
217 -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH
218DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
219
220top_distdir = ..
221distdir = $(top_distdir)/$(PACKAGE)-$(VERSION)
222
223distdir: $(DISTFILES)
224 $(mkinstalldirs) $(distdir)/private
225 @list='$(DISTFILES)'; for file in $$list; do \
226 if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
227 dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
228 if test "$$dir" != "$$file" && test "$$dir" != "."; then \
229 dir="/$$dir"; \
230 $(mkinstalldirs) "$(distdir)$$dir"; \
231 else \
232 dir=''; \
233 fi; \
234 if test -d $$d/$$file; then \
235 if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
236 cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
237 fi; \
238 cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
239 else \
240 test -f $(distdir)/$$file \
241 || cp -p $$d/$$file $(distdir)/$$file \
242 || exit 1; \
243 fi; \
244 done
245check-am: all-am
246check: check-am
247all-am: Makefile $(HEADERS)
248
249installdirs:
250 $(mkinstalldirs) $(DESTDIR)$(pkgincludedir)
251
252install: install-am
253install-exec: install-exec-am
254install-data: install-data-am
255uninstall: uninstall-am
256
257install-am: all-am
258 @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
259
260installcheck: installcheck-am
261install-strip:
262 $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
263 INSTALL_STRIP_FLAG=-s \
264 `test -z '$(STRIP)' || \
265 echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
266mostlyclean-generic:
267
268clean-generic:
269
270distclean-generic:
271 -rm -f Makefile $(CONFIG_CLEAN_FILES)
272
273maintainer-clean-generic:
274 @echo "This command is intended for maintainers to use"
275 @echo "it deletes files that may require special tools to rebuild."
276clean: clean-am
277
278clean-am: clean-generic clean-libtool mostlyclean-am
279
280distclean: distclean-am
281
282distclean-am: clean-am distclean-generic distclean-libtool \
283 distclean-tags
284
285dvi: dvi-am
286
287dvi-am:
288
289info: info-am
290
291info-am:
292
293install-data-am: install-pkgincludeHEADERS
294
295install-exec-am:
296
297install-info: install-info-am
298
299install-man:
300
301installcheck-am:
302
303maintainer-clean: maintainer-clean-am
304
305maintainer-clean-am: distclean-am maintainer-clean-generic
306
307mostlyclean: mostlyclean-am
308
309mostlyclean-am: mostlyclean-generic mostlyclean-libtool
310
311uninstall-am: uninstall-info-am uninstall-pkgincludeHEADERS
312
313.PHONY: GTAGS all all-am check check-am clean clean-generic \
314 clean-libtool distclean distclean-generic distclean-libtool \
315 distclean-tags distdir dvi dvi-am info info-am install \
316 install-am install-data install-data-am install-exec \
317 install-exec-am install-info install-info-am install-man \
318 install-pkgincludeHEADERS install-strip installcheck \
319 installcheck-am installdirs maintainer-clean \
320 maintainer-clean-generic mostlyclean mostlyclean-generic \
321 mostlyclean-libtool tags uninstall uninstall-am \
322 uninstall-info-am uninstall-pkgincludeHEADERS
323
324# Tell versions [3.59,3.63) of GNU make to not export all variables.
325# Otherwise a system limit (for SysV at least) may be exceeded.
326.NOEXPORT:
diff --git a/gc/include/cord.h b/gc/include/cord.h
deleted file mode 100644
index 926089e86fb..00000000000
--- a/gc/include/cord.h
+++ /dev/null
@@ -1,327 +0,0 @@
1/*
2 * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
3 *
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
6 *
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
12 *
13 * Author: Hans-J. Boehm (boehm@parc.xerox.com)
14 */
15/* Boehm, October 5, 1995 4:20 pm PDT */
16
17/*
18 * Cords are immutable character strings. A number of operations
19 * on long cords are much more efficient than their strings.h counterpart.
20 * In particular, concatenation takes constant time independent of the length
21 * of the arguments. (Cords are represented as trees, with internal
22 * nodes representing concatenation and leaves consisting of either C
23 * strings or a functional description of the string.)
24 *
25 * The following are reasonable applications of cords. They would perform
26 * unacceptably if C strings were used:
27 * - A compiler that produces assembly language output by repeatedly
28 * concatenating instructions onto a cord representing the output file.
29 * - A text editor that converts the input file to a cord, and then
30 * performs editing operations by producing a new cord representing
31 * the file after echa character change (and keeping the old ones in an
32 * edit history)
33 *
34 * For optimal performance, cords should be built by
35 * concatenating short sections.
36 * This interface is designed for maximum compatibility with C strings.
37 * ASCII NUL characters may be embedded in cords using CORD_from_fn.
38 * This is handled correctly, but CORD_to_char_star will produce a string
39 * with embedded NULs when given such a cord.
40 *
41 * This interface is fairly big, largely for performance reasons.
42 * The most basic constants and functions:
43 *
44 * CORD - the type of a cord;
45 * CORD_EMPTY - empty cord;
46 * CORD_len(cord) - length of a cord;
47 * CORD_cat(cord1,cord2) - concatenation of two cords;
48 * CORD_substr(cord, start, len) - substring (or subcord);
49 * CORD_pos i; CORD_FOR(i, cord) { ... CORD_pos_fetch(i) ... } -
50 * examine each character in a cord. CORD_pos_fetch(i) is the char.
51 * CORD_fetch(int i) - Retrieve i'th character (slowly).
52 * CORD_cmp(cord1, cord2) - compare two cords.
53 * CORD_from_file(FILE * f) - turn a read-only file into a cord.
54 * CORD_to_char_star(cord) - convert to C string.
55 * (Non-NULL C constant strings are cords.)
56 * CORD_printf (etc.) - cord version of printf. Use %r for cords.
57 */
58# ifndef CORD_H
59
60# define CORD_H
61# include <stddef.h>
62# include <stdio.h>
63/* Cords have type const char *. This is cheating quite a bit, and not */
64/* 100% portable. But it means that nonempty character string */
65/* constants may be used as cords directly, provided the string is */
66/* never modified in place. The empty cord is represented by, and */
67/* can be written as, 0. */
68
69typedef const char * CORD;
70
71/* An empty cord is always represented as nil */
72# define CORD_EMPTY 0
73
74/* Is a nonempty cord represented as a C string? */
75#define CORD_IS_STRING(s) (*(s) != '\0')
76
77/* Concatenate two cords. If the arguments are C strings, they may */
78/* not be subsequently altered. */
79CORD CORD_cat(CORD x, CORD y);
80
81/* Concatenate a cord and a C string with known length. Except for the */
82/* empty string case, this is a special case of CORD_cat. Since the */
83/* length is known, it can be faster. */
84/* The string y is shared with the resulting CORD. Hence it should */
85/* not be altered by the caller. */
86CORD CORD_cat_char_star(CORD x, const char * y, size_t leny);
87
88/* Compute the length of a cord */
89size_t CORD_len(CORD x);
90
91/* Cords may be represented by functions defining the ith character */
92typedef char (* CORD_fn)(size_t i, void * client_data);
93
94/* Turn a functional description into a cord. */
95CORD CORD_from_fn(CORD_fn fn, void * client_data, size_t len);
96
97/* Return the substring (subcord really) of x with length at most n, */
98/* starting at position i. (The initial character has position 0.) */
99CORD CORD_substr(CORD x, size_t i, size_t n);
100
101/* Return the argument, but rebalanced to allow more efficient */
102/* character retrieval, substring operations, and comparisons. */
103/* This is useful only for cords that were built using repeated */
104/* concatenation. Guarantees log time access to the result, unless */
105/* x was obtained through a large number of repeated substring ops */
106/* or the embedded functional descriptions take longer to evaluate. */
107/* May reallocate significant parts of the cord. The argument is not */
108/* modified; only the result is balanced. */
109CORD CORD_balance(CORD x);
110
111/* The following traverse a cord by applying a function to each */
112/* character. This is occasionally appropriate, especially where */
113/* speed is crucial. But, since C doesn't have nested functions, */
114/* clients of this sort of traversal are clumsy to write. Consider */
115/* the functions that operate on cord positions instead. */
116
117/* Function to iteratively apply to individual characters in cord. */
118typedef int (* CORD_iter_fn)(char c, void * client_data);
119
120/* Function to apply to substrings of a cord. Each substring is a */
121/* a C character string, not a general cord. */
122typedef int (* CORD_batched_iter_fn)(const char * s, void * client_data);
123# define CORD_NO_FN ((CORD_batched_iter_fn)0)
124
125/* Apply f1 to each character in the cord, in ascending order, */
126/* starting at position i. If */
127/* f2 is not CORD_NO_FN, then multiple calls to f1 may be replaced by */
128/* a single call to f2. The parameter f2 is provided only to allow */
129/* some optimization by the client. This terminates when the right */
130/* end of this string is reached, or when f1 or f2 return != 0. In the */
131/* latter case CORD_iter returns != 0. Otherwise it returns 0. */
132/* The specified value of i must be < CORD_len(x). */
133int CORD_iter5(CORD x, size_t i, CORD_iter_fn f1,
134 CORD_batched_iter_fn f2, void * client_data);
135
136/* A simpler version that starts at 0, and without f2: */
137int CORD_iter(CORD x, CORD_iter_fn f1, void * client_data);
138# define CORD_iter(x, f1, cd) CORD_iter5(x, 0, f1, CORD_NO_FN, cd)
139
140/* Similar to CORD_iter5, but end-to-beginning. No provisions for */
141/* CORD_batched_iter_fn. */
142int CORD_riter4(CORD x, size_t i, CORD_iter_fn f1, void * client_data);
143
144/* A simpler version that starts at the end: */
145int CORD_riter(CORD x, CORD_iter_fn f1, void * client_data);
146
147/* Functions that operate on cord positions. The easy way to traverse */
148/* cords. A cord position is logically a pair consisting of a cord */
149/* and an index into that cord. But it is much faster to retrieve a */
150/* charcter based on a position than on an index. Unfortunately, */
151/* positions are big (order of a few 100 bytes), so allocate them with */
152/* caution. */
153/* Things in cord_pos.h should be treated as opaque, except as */
154/* described below. Also note that */
155/* CORD_pos_fetch, CORD_next and CORD_prev have both macro and function */
156/* definitions. The former may evaluate their argument more than once. */
157# include "private/cord_pos.h"
158
159/*
160 Visible definitions from above:
161
162 typedef <OPAQUE but fairly big> CORD_pos[1];
163
164 * Extract the cord from a position:
165 CORD CORD_pos_to_cord(CORD_pos p);
166
167 * Extract the current index from a position:
168 size_t CORD_pos_to_index(CORD_pos p);
169
170 * Fetch the character located at the given position:
171 char CORD_pos_fetch(CORD_pos p);
172
173 * Initialize the position to refer to the given cord and index.
174 * Note that this is the most expensive function on positions:
175 void CORD_set_pos(CORD_pos p, CORD x, size_t i);
176
177 * Advance the position to the next character.
178 * P must be initialized and valid.
179 * Invalidates p if past end:
180 void CORD_next(CORD_pos p);
181
182 * Move the position to the preceding character.
183 * P must be initialized and valid.
184 * Invalidates p if past beginning:
185 void CORD_prev(CORD_pos p);
186
187 * Is the position valid, i.e. inside the cord?
188 int CORD_pos_valid(CORD_pos p);
189*/
190# define CORD_FOR(pos, cord) \
191 for (CORD_set_pos(pos, cord, 0); CORD_pos_valid(pos); CORD_next(pos))
192
193
194/* An out of memory handler to call. May be supplied by client. */
195/* Must not return. */
196extern void (* CORD_oom_fn)(void);
197
198/* Dump the representation of x to stdout in an implementation defined */
199/* manner. Intended for debugging only. */
200void CORD_dump(CORD x);
201
202/* The following could easily be implemented by the client. They are */
203/* provided in cordxtra.c for convenience. */
204
205/* Concatenate a character to the end of a cord. */
206CORD CORD_cat_char(CORD x, char c);
207
208/* Concatenate n cords. */
209CORD CORD_catn(int n, /* CORD */ ...);
210
211/* Return the character in CORD_substr(x, i, 1) */
212char CORD_fetch(CORD x, size_t i);
213
214/* Return < 0, 0, or > 0, depending on whether x < y, x = y, x > y */
215int CORD_cmp(CORD x, CORD y);
216
217/* A generalization that takes both starting positions for the */
218/* comparison, and a limit on the number of characters to be compared. */
219int CORD_ncmp(CORD x, size_t x_start, CORD y, size_t y_start, size_t len);
220
221/* Find the first occurrence of s in x at position start or later. */
222/* Return the position of the first character of s in x, or */
223/* CORD_NOT_FOUND if there is none. */
224size_t CORD_str(CORD x, size_t start, CORD s);
225
226/* Return a cord consisting of i copies of (possibly NUL) c. Dangerous */
227/* in conjunction with CORD_to_char_star. */
228/* The resulting representation takes constant space, independent of i. */
229CORD CORD_chars(char c, size_t i);
230# define CORD_nul(i) CORD_chars('\0', (i))
231
232/* Turn a file into cord. The file must be seekable. Its contents */
233/* must remain constant. The file may be accessed as an immediate */
234/* result of this call and/or as a result of subsequent accesses to */
235/* the cord. Short files are likely to be immediately read, but */
236/* long files are likely to be read on demand, possibly relying on */
237/* stdio for buffering. */
238/* We must have exclusive access to the descriptor f, i.e. we may */
239/* read it at any time, and expect the file pointer to be */
240/* where we left it. Normally this should be invoked as */
241/* CORD_from_file(fopen(...)) */
242/* CORD_from_file arranges to close the file descriptor when it is no */
243/* longer needed (e.g. when the result becomes inaccessible). */
244/* The file f must be such that ftell reflects the actual character */
245/* position in the file, i.e. the number of characters that can be */
246/* or were read with fread. On UNIX systems this is always true. On */
247/* MS Windows systems, f must be opened in binary mode. */
248CORD CORD_from_file(FILE * f);
249
250/* Equivalent to the above, except that the entire file will be read */
251/* and the file pointer will be closed immediately. */
252/* The binary mode restriction from above does not apply. */
253CORD CORD_from_file_eager(FILE * f);
254
255/* Equivalent to the above, except that the file will be read on demand.*/
256/* The binary mode restriction applies. */
257CORD CORD_from_file_lazy(FILE * f);
258
259/* Turn a cord into a C string. The result shares no structure with */
260/* x, and is thus modifiable. */
261char * CORD_to_char_star(CORD x);
262
263/* Turn a C string into a CORD. The C string is copied, and so may */
264/* subsequently be modified. */
265CORD CORD_from_char_star(const char *s);
266
267/* Identical to the above, but the result may share structure with */
268/* the argument and is thus not modifiable. */
269const char * CORD_to_const_char_star(CORD x);
270
271/* Write a cord to a file, starting at the current position. No */
272/* trailing NULs are newlines are added. */
273/* Returns EOF if a write error occurs, 1 otherwise. */
274int CORD_put(CORD x, FILE * f);
275
276/* "Not found" result for the following two functions. */
277# define CORD_NOT_FOUND ((size_t)(-1))
278
279/* A vague analog of strchr. Returns the position (an integer, not */
280/* a pointer) of the first occurrence of (char) c inside x at position */
281/* i or later. The value i must be < CORD_len(x). */
282size_t CORD_chr(CORD x, size_t i, int c);
283
284/* A vague analog of strrchr. Returns index of the last occurrence */
285/* of (char) c inside x at position i or earlier. The value i */
286/* must be < CORD_len(x). */
287size_t CORD_rchr(CORD x, size_t i, int c);
288
289
290/* The following are also not primitive, but are implemented in */
291/* cordprnt.c. They provide functionality similar to the ANSI C */
292/* functions with corresponding names, but with the following */
293/* additions and changes: */
294/* 1. A %r conversion specification specifies a CORD argument. Field */
295/* width, precision, etc. have the same semantics as for %s. */
296/* (Note that %c,%C, and %S were already taken.) */
297/* 2. The format string is represented as a CORD. */
298/* 3. CORD_sprintf and CORD_vsprintf assign the result through the 1st */ /* argument. Unlike their ANSI C versions, there is no need to guess */
299/* the correct buffer size. */
300/* 4. Most of the conversions are implement through the native */
301/* vsprintf. Hence they are usually no faster, and */
302/* idiosyncracies of the native printf are preserved. However, */
303/* CORD arguments to CORD_sprintf and CORD_vsprintf are NOT copied; */
304/* the result shares the original structure. This may make them */
305/* very efficient in some unusual applications. */
306/* The format string is copied. */
307/* All functions return the number of characters generated or -1 on */
308/* error. This complies with the ANSI standard, but is inconsistent */
309/* with some older implementations of sprintf. */
310
311/* The implementation of these is probably less portable than the rest */
312/* of this package. */
313
314#ifndef CORD_NO_IO
315
316#include <stdarg.h>
317
318int CORD_sprintf(CORD * out, CORD format, ...);
319int CORD_vsprintf(CORD * out, CORD format, va_list args);
320int CORD_fprintf(FILE * f, CORD format, ...);
321int CORD_vfprintf(FILE * f, CORD format, va_list args);
322int CORD_printf(CORD format, ...);
323int CORD_vprintf(CORD format, va_list args);
324
325#endif /* CORD_NO_IO */
326
327# endif /* CORD_H */
diff --git a/gc/include/ec.h b/gc/include/ec.h
deleted file mode 100644
index c829b83ad11..00000000000
--- a/gc/include/ec.h
+++ /dev/null
@@ -1,70 +0,0 @@
1# ifndef EC_H
2# define EC_H
3
4# ifndef CORD_H
5# include "cord.h"
6# endif
7
8/* Extensible cords are strings that may be destructively appended to. */
9/* They allow fast construction of cords from characters that are */
10/* being read from a stream. */
11/*
12 * A client might look like:
13 *
14 * {
15 * CORD_ec x;
16 * CORD result;
17 * char c;
18 * FILE *f;
19 *
20 * ...
21 * CORD_ec_init(x);
22 * while(...) {
23 * c = getc(f);
24 * ...
25 * CORD_ec_append(x, c);
26 * }
27 * result = CORD_balance(CORD_ec_to_cord(x));
28 *
29 * If a C string is desired as the final result, the call to CORD_balance
30 * may be replaced by a call to CORD_to_char_star.
31 */
32
33# ifndef CORD_BUFSZ
34# define CORD_BUFSZ 128
35# endif
36
37typedef struct CORD_ec_struct {
38 CORD ec_cord;
39 char * ec_bufptr;
40 char ec_buf[CORD_BUFSZ+1];
41} CORD_ec[1];
42
43/* This structure represents the concatenation of ec_cord with */
44/* ec_buf[0 ... (ec_bufptr-ec_buf-1)] */
45
46/* Flush the buffer part of the extended chord into ec_cord. */
47/* Note that this is almost the only real function, and it is */
48/* implemented in 6 lines in cordxtra.c */
49void CORD_ec_flush_buf(CORD_ec x);
50
51/* Convert an extensible cord to a cord. */
52# define CORD_ec_to_cord(x) (CORD_ec_flush_buf(x), (x)[0].ec_cord)
53
54/* Initialize an extensible cord. */
55# define CORD_ec_init(x) ((x)[0].ec_cord = 0, (x)[0].ec_bufptr = (x)[0].ec_buf)
56
57/* Append a character to an extensible cord. */
58# define CORD_ec_append(x, c) \
59 { \
60 if ((x)[0].ec_bufptr == (x)[0].ec_buf + CORD_BUFSZ) { \
61 CORD_ec_flush_buf(x); \
62 } \
63 *((x)[0].ec_bufptr)++ = (c); \
64 }
65
66/* Append a cord to an extensible cord. Structure remains shared with */
67/* original. */
68void CORD_ec_append_cord(CORD_ec x, CORD s);
69
70# endif /* EC_H */
diff --git a/gc/include/gc.h b/gc/include/gc.h
deleted file mode 100644
index b1c64ced085..00000000000
--- a/gc/include/gc.h
+++ /dev/null
@@ -1,952 +0,0 @@
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright 1999 by Hewlett-Packard Company. All rights reserved.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17/*
18 * Note that this defines a large number of tuning hooks, which can
19 * safely be ignored in nearly all cases. For normal use it suffices
20 * to call only GC_MALLOC and perhaps GC_REALLOC.
21 * For better performance, also look at GC_MALLOC_ATOMIC, and
22 * GC_enable_incremental. If you need an action to be performed
23 * immediately before an object is collected, look at GC_register_finalizer.
24 * If you are using Solaris threads, look at the end of this file.
25 * Everything else is best ignored unless you encounter performance
26 * problems.
27 */
28
29#ifndef _GC_H
30
31# define _GC_H
32
33# include "gc_config_macros.h"
34
35# if defined(__STDC__) || defined(__cplusplus)
36# define GC_PROTO(args) args
37 typedef void * GC_PTR;
38# define GC_CONST const
39# else
40# define GC_PROTO(args) ()
41 typedef char * GC_PTR;
42# define GC_CONST
43# endif
44
45# ifdef __cplusplus
46 extern "C" {
47# endif
48
49
50/* Define word and signed_word to be unsigned and signed types of the */
51/* size as char * or void *. There seems to be no way to do this */
52/* even semi-portably. The following is probably no better/worse */
53/* than almost anything else. */
54/* The ANSI standard suggests that size_t and ptr_diff_t might be */
55/* better choices. But those appear to have incorrect definitions */
56/* on may systems. Notably "typedef int size_t" seems to be both */
57/* frequent and WRONG. */
58typedef unsigned long GC_word;
59typedef long GC_signed_word;
60
61/* Public read-only variables */
62
63GC_API GC_word GC_gc_no;/* Counter incremented per collection. */
64 /* Includes empty GCs at startup. */
65
66GC_API int GC_parallel; /* GC is parallelized for performance on */
67 /* multiprocessors. Currently set only */
68 /* implicitly if collector is built with */
69 /* -DPARALLEL_MARK and if either: */
70 /* Env variable GC_NPROC is set to > 1, or */
71 /* GC_NPROC is not set and this is an MP. */
72 /* If GC_parallel is set, incremental */
73 /* collection is only partially functional, */
74 /* and may not be desirable. */
75
76
77/* Public R/W variables */
78
79GC_API GC_PTR (*GC_oom_fn) GC_PROTO((size_t bytes_requested));
80 /* When there is insufficient memory to satisfy */
81 /* an allocation request, we return */
82 /* (*GC_oom_fn)(). By default this just */
83 /* returns 0. */
84 /* If it returns, it must return 0 or a valid */
85 /* pointer to a previously allocated heap */
86 /* object. */
87
88GC_API int GC_find_leak;
89 /* Do not actually garbage collect, but simply */
90 /* report inaccessible memory that was not */
91 /* deallocated with GC_free. Initial value */
92 /* is determined by FIND_LEAK macro. */
93
94GC_API int GC_all_interior_pointers;
95 /* Arrange for pointers to object interiors to */
96 /* be recognized as valid. May not be changed */
97 /* after GC initialization. */
98 /* Initial value is determined by */
99 /* -DALL_INTERIOR_POINTERS. */
100 /* Unless DONT_ADD_BYTE_AT_END is defined, this */
101 /* also affects whether sizes are increased by */
102 /* at least a byte to allow "off the end" */
103 /* pointer recognition. */
104 /* MUST BE 0 or 1. */
105
106GC_API int GC_quiet; /* Disable statistics output. Only matters if */
107 /* collector has been compiled with statistics */
108 /* enabled. This involves a performance cost, */
109 /* and is thus not the default. */
110
111GC_API int GC_finalize_on_demand;
112 /* If nonzero, finalizers will only be run in */
113 /* response to an explicit GC_invoke_finalizers */
114 /* call. The default is determined by whether */
115 /* the FINALIZE_ON_DEMAND macro is defined */
116 /* when the collector is built. */
117
118GC_API int GC_java_finalization;
119 /* Mark objects reachable from finalizable */
120 /* objects in a separate postpass. This makes */
121 /* it a bit safer to use non-topologically- */
122 /* ordered finalization. Default value is */
123 /* determined by JAVA_FINALIZATION macro. */
124
125GC_API void (* GC_finalizer_notifier)();
126 /* Invoked by the collector when there are */
127 /* objects to be finalized. Invoked at most */
128 /* once per GC cycle. Never invoked unless */
129 /* GC_finalize_on_demand is set. */
130 /* Typically this will notify a finalization */
131 /* thread, which will call GC_invoke_finalizers */
132 /* in response. */
133
134GC_API int GC_dont_gc; /* != 0 ==> Dont collect. In versions 7.2a1+, */
135 /* this overrides explicit GC_gcollect() calls. */
136 /* Used as a counter, so that nested enabling */
137 /* and disabling work correctly. Should */
138 /* normally be updated with GC_enable() and */
139 /* GC_disable() calls. */
140 /* Direct assignment to GC_dont_gc is */
141 /* deprecated. */
142
143GC_API int GC_dont_expand;
144 /* Dont expand heap unless explicitly requested */
145 /* or forced to. */
146
147GC_API int GC_use_entire_heap;
148 /* Causes the nonincremental collector to use the */
149 /* entire heap before collecting. This was the only */
150 /* option for GC versions < 5.0. This sometimes */
151 /* results in more large block fragmentation, since */
152 /* very larg blocks will tend to get broken up */
153 /* during each GC cycle. It is likely to result in a */
154 /* larger working set, but lower collection */
155 /* frequencies, and hence fewer instructions executed */
156 /* in the collector. */
157
158GC_API int GC_full_freq; /* Number of partial collections between */
159 /* full collections. Matters only if */
160 /* GC_incremental is set. */
161 /* Full collections are also triggered if */
162 /* the collector detects a substantial */
163 /* increase in the number of in-use heap */
164 /* blocks. Values in the tens are now */
165 /* perfectly reasonable, unlike for */
166 /* earlier GC versions. */
167
168GC_API GC_word GC_non_gc_bytes;
169 /* Bytes not considered candidates for collection. */
170 /* Used only to control scheduling of collections. */
171 /* Updated by GC_malloc_uncollectable and GC_free. */
172 /* Wizards only. */
173
174GC_API int GC_no_dls;
175 /* Don't register dynamic library data segments. */
176 /* Wizards only. Should be used only if the */
177 /* application explicitly registers all roots. */
178 /* In Microsoft Windows environments, this will */
179 /* usually also prevent registration of the */
180 /* main data segment as part of the root set. */
181
182GC_API GC_word GC_free_space_divisor;
183 /* We try to make sure that we allocate at */
184 /* least N/GC_free_space_divisor bytes between */
185 /* collections, where N is the heap size plus */
186 /* a rough estimate of the root set size. */
187 /* Initially, GC_free_space_divisor = 4. */
188 /* Increasing its value will use less space */
189 /* but more collection time. Decreasing it */
190 /* will appreciably decrease collection time */
191 /* at the expense of space. */
192 /* GC_free_space_divisor = 1 will effectively */
193 /* disable collections. */
194
195GC_API GC_word GC_max_retries;
196 /* The maximum number of GCs attempted before */
197 /* reporting out of memory after heap */
198 /* expansion fails. Initially 0. */
199
200
201GC_API char *GC_stackbottom; /* Cool end of user stack. */
202 /* May be set in the client prior to */
203 /* calling any GC_ routines. This */
204 /* avoids some overhead, and */
205 /* potentially some signals that can */
206 /* confuse debuggers. Otherwise the */
207 /* collector attempts to set it */
208 /* automatically. */
209 /* For multithreaded code, this is the */
210 /* cold end of the stack for the */
211 /* primordial thread. */
212
213GC_API int GC_dont_precollect; /* Don't collect as part of */
214 /* initialization. Should be set only */
215 /* if the client wants a chance to */
216 /* manually initialize the root set */
217 /* before the first collection. */
218 /* Interferes with blacklisting. */
219 /* Wizards only. */
220
221GC_API unsigned long GC_time_limit;
222 /* If incremental collection is enabled, */
223 /* We try to terminate collections */
224 /* after this many milliseconds. Not a */
225 /* hard time bound. Setting this to */
226 /* GC_TIME_UNLIMITED will essentially */
227 /* disable incremental collection while */
228 /* leaving generational collection */
229 /* enabled. */
230# define GC_TIME_UNLIMITED 999999
231 /* Setting GC_time_limit to this value */
232 /* will disable the "pause time exceeded"*/
233 /* tests. */
234
235/* Public procedures */
236
237/* Initialize the collector. This is only required when using thread-local
238 * allocation, since unlike the regular allocation routines, GC_local_malloc
239 * is not self-initializing. If you use GC_local_malloc you should arrange
240 * to call this somehow (e.g. from a constructor) before doing any allocation.
241 */
242GC_API void GC_init GC_PROTO((void));
243
244/*
245 * general purpose allocation routines, with roughly malloc calling conv.
246 * The atomic versions promise that no relevant pointers are contained
247 * in the object. The nonatomic versions guarantee that the new object
248 * is cleared. GC_malloc_stubborn promises that no changes to the object
249 * will occur after GC_end_stubborn_change has been called on the
250 * result of GC_malloc_stubborn. GC_malloc_uncollectable allocates an object
251 * that is scanned for pointers to collectable objects, but is not itself
252 * collectable. The object is scanned even if it does not appear to
253 * be reachable. GC_malloc_uncollectable and GC_free called on the resulting
254 * object implicitly update GC_non_gc_bytes appropriately.
255 *
256 * Note that the GC_malloc_stubborn support is stubbed out by default
257 * starting in 6.0. GC_malloc_stubborn is an alias for GC_malloc unless
258 * the collector is built with STUBBORN_ALLOC defined.
259 */
260GC_API GC_PTR GC_malloc GC_PROTO((size_t size_in_bytes));
261GC_API GC_PTR GC_malloc_atomic GC_PROTO((size_t size_in_bytes));
262GC_API GC_PTR GC_malloc_uncollectable GC_PROTO((size_t size_in_bytes));
263GC_API GC_PTR GC_malloc_stubborn GC_PROTO((size_t size_in_bytes));
264
265/* The following is only defined if the library has been suitably */
266/* compiled: */
267GC_API GC_PTR GC_malloc_atomic_uncollectable GC_PROTO((size_t size_in_bytes));
268
269/* Explicitly deallocate an object. Dangerous if used incorrectly. */
270/* Requires a pointer to the base of an object. */
271/* If the argument is stubborn, it should not be changeable when freed. */
272/* An object should not be enable for finalization when it is */
273/* explicitly deallocated. */
274/* GC_free(0) is a no-op, as required by ANSI C for free. */
275GC_API void GC_free GC_PROTO((GC_PTR object_addr));
276
277/*
278 * Stubborn objects may be changed only if the collector is explicitly informed.
279 * The collector is implicitly informed of coming change when such
280 * an object is first allocated. The following routines inform the
281 * collector that an object will no longer be changed, or that it will
282 * once again be changed. Only nonNIL pointer stores into the object
283 * are considered to be changes. The argument to GC_end_stubborn_change
284 * must be exacly the value returned by GC_malloc_stubborn or passed to
285 * GC_change_stubborn. (In the second case it may be an interior pointer
286 * within 512 bytes of the beginning of the objects.)
287 * There is a performance penalty for allowing more than
288 * one stubborn object to be changed at once, but it is acceptable to
289 * do so. The same applies to dropping stubborn objects that are still
290 * changeable.
291 */
292GC_API void GC_change_stubborn GC_PROTO((GC_PTR));
293GC_API void GC_end_stubborn_change GC_PROTO((GC_PTR));
294
295/* Return a pointer to the base (lowest address) of an object given */
296/* a pointer to a location within the object. */
297/* I.e. map an interior pointer to the corresponding bas pointer. */
298/* Note that with debugging allocation, this returns a pointer to the */
299/* actual base of the object, i.e. the debug information, not to */
300/* the base of the user object. */
301/* Return 0 if displaced_pointer doesn't point to within a valid */
302/* object. */
303GC_API GC_PTR GC_base GC_PROTO((GC_PTR displaced_pointer));
304
305/* Given a pointer to the base of an object, return its size in bytes. */
306/* The returned size may be slightly larger than what was originally */
307/* requested. */
308GC_API size_t GC_size GC_PROTO((GC_PTR object_addr));
309
310/* For compatibility with C library. This is occasionally faster than */
311/* a malloc followed by a bcopy. But if you rely on that, either here */
312/* or with the standard C library, your code is broken. In my */
313/* opinion, it shouldn't have been invented, but now we're stuck. -HB */
314/* The resulting object has the same kind as the original. */
315/* If the argument is stubborn, the result will have changes enabled. */
316/* It is an error to have changes enabled for the original object. */
317/* Follows ANSI comventions for NULL old_object. */
318GC_API GC_PTR GC_realloc
319 GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes));
320
321/* Explicitly increase the heap size. */
322/* Returns 0 on failure, 1 on success. */
323GC_API int GC_expand_hp GC_PROTO((size_t number_of_bytes));
324
325/* Limit the heap size to n bytes. Useful when you're debugging, */
326/* especially on systems that don't handle running out of memory well. */
327/* n == 0 ==> unbounded. This is the default. */
328GC_API void GC_set_max_heap_size GC_PROTO((GC_word n));
329
330/* Inform the collector that a certain section of statically allocated */
331/* memory contains no pointers to garbage collected memory. Thus it */
332/* need not be scanned. This is sometimes important if the application */
333/* maps large read/write files into the address space, which could be */
334/* mistaken for dynamic library data segments on some systems. */
335GC_API void GC_exclude_static_roots GC_PROTO((GC_PTR start, GC_PTR finish));
336
337/* Clear the set of root segments. Wizards only. */
338GC_API void GC_clear_roots GC_PROTO((void));
339
340/* Add a root segment. Wizards only. */
341GC_API void GC_add_roots GC_PROTO((char * low_address,
342 char * high_address_plus_1));
343
344/* Add a displacement to the set of those considered valid by the */
345/* collector. GC_register_displacement(n) means that if p was returned */
346/* by GC_malloc, then (char *)p + n will be considered to be a valid */
347/* pointer to n. N must be small and less than the size of p. */
348/* (All pointers to the interior of objects from the stack are */
349/* considered valid in any case. This applies to heap objects and */
350/* static data.) */
351/* Preferably, this should be called before any other GC procedures. */
352/* Calling it later adds to the probability of excess memory */
353/* retention. */
354/* This is a no-op if the collector was compiled with recognition of */
355/* arbitrary interior pointers enabled, which is now the default. */
356GC_API void GC_register_displacement GC_PROTO((GC_word n));
357
358/* The following version should be used if any debugging allocation is */
359/* being done. */
360GC_API void GC_debug_register_displacement GC_PROTO((GC_word n));
361
362/* Explicitly trigger a full, world-stop collection. */
363GC_API void GC_gcollect GC_PROTO((void));
364
365/* Trigger a full world-stopped collection. Abort the collection if */
366/* and when stop_func returns a nonzero value. Stop_func will be */
367/* called frequently, and should be reasonably fast. This works even */
368/* if virtual dirty bits, and hence incremental collection is not */
369/* available for this architecture. Collections can be aborted faster */
370/* than normal pause times for incremental collection. However, */
371/* aborted collections do no useful work; the next collection needs */
372/* to start from the beginning. */
373/* Return 0 if the collection was aborted, 1 if it succeeded. */
374typedef int (* GC_stop_func) GC_PROTO((void));
375GC_API int GC_try_to_collect GC_PROTO((GC_stop_func stop_func));
376
377/* Return the number of bytes in the heap. Excludes collector private */
378/* data structures. Includes empty blocks and fragmentation loss. */
379/* Includes some pages that were allocated but never written. */
380GC_API size_t GC_get_heap_size GC_PROTO((void));
381
382/* Return a lower bound on the number of free bytes in the heap. */
383GC_API size_t GC_get_free_bytes GC_PROTO((void));
384
385/* Return the number of bytes allocated since the last collection. */
386GC_API size_t GC_get_bytes_since_gc GC_PROTO((void));
387
388/* Return the total number of bytes allocated in this process. */
389/* Never decreases, except due to wrapping. */
390GC_API size_t GC_get_total_bytes GC_PROTO((void));
391
392/* Disable garbage collection. Even GC_gcollect calls will be */
393/* ineffective. */
394GC_API void GC_disable GC_PROTO((void));
395
396/* Reenable garbage collection. GC_diable() and GC_enable() calls */
397/* nest. Garbage collection is enabled if the number of calls to both */
398/* both functions is equal. */
399GC_API void GC_enable GC_PROTO((void));
400
401/* Enable incremental/generational collection. */
402/* Not advisable unless dirty bits are */
403/* available or most heap objects are */
404/* pointerfree(atomic) or immutable. */
405/* Don't use in leak finding mode. */
406/* Ignored if GC_dont_gc is true. */
407/* Only the generational piece of this is */
408/* functional if GC_parallel is TRUE */
409/* or if GC_time_limit is GC_TIME_UNLIMITED. */
410/* Causes GC_local_gcj_malloc() to revert to */
411/* locked allocation. Must be called */
412/* before any GC_local_gcj_malloc() calls. */
413GC_API void GC_enable_incremental GC_PROTO((void));
414
415/* Does incremental mode write-protect pages? Returns zero or */
416/* more of the following, or'ed together: */
417#define GC_PROTECTS_POINTER_HEAP 1 /* May protect non-atomic objs. */
418#define GC_PROTECTS_PTRFREE_HEAP 2
419#define GC_PROTECTS_STATIC_DATA 4 /* Curently never. */
420#define GC_PROTECTS_STACK 8 /* Probably impractical. */
421
422#define GC_PROTECTS_NONE 0
423GC_API int GC_incremental_protection_needs GC_PROTO((void));
424
425/* Perform some garbage collection work, if appropriate. */
426/* Return 0 if there is no more work to be done. */
427/* Typically performs an amount of work corresponding roughly */
428/* to marking from one page. May do more work if further */
429/* progress requires it, e.g. if incremental collection is */
430/* disabled. It is reasonable to call this in a wait loop */
431/* until it returns 0. */
432GC_API int GC_collect_a_little GC_PROTO((void));
433
434/* Allocate an object of size lb bytes. The client guarantees that */
435/* as long as the object is live, it will be referenced by a pointer */
436/* that points to somewhere within the first 256 bytes of the object. */
437/* (This should normally be declared volatile to prevent the compiler */
438/* from invalidating this assertion.) This routine is only useful */
439/* if a large array is being allocated. It reduces the chance of */
440/* accidentally retaining such an array as a result of scanning an */
441/* integer that happens to be an address inside the array. (Actually, */
442/* it reduces the chance of the allocator not finding space for such */
443/* an array, since it will try hard to avoid introducing such a false */
444/* reference.) On a SunOS 4.X or MS Windows system this is recommended */
445/* for arrays likely to be larger than 100K or so. For other systems, */
446/* or if the collector is not configured to recognize all interior */
447/* pointers, the threshold is normally much higher. */
448GC_API GC_PTR GC_malloc_ignore_off_page GC_PROTO((size_t lb));
449GC_API GC_PTR GC_malloc_atomic_ignore_off_page GC_PROTO((size_t lb));
450
451#if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720
452# define GC_ADD_CALLER
453# define GC_RETURN_ADDR (GC_word)__return_address
454#endif
455
456#ifdef __linux__
457# include <features.h>
458# if (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1 || __GLIBC__ > 2) \
459 && !defined(__ia64__)
460# define GC_HAVE_BUILTIN_BACKTRACE
461# define GC_CAN_SAVE_CALL_STACKS
462# endif
463# if defined(__i386__) || defined(__x86_64__)
464# define GC_CAN_SAVE_CALL_STACKS
465# endif
466#endif
467
468#if defined(__sparc__)
469# define GC_CAN_SAVE_CALL_STACKS
470#endif
471
472/* If we're on an a platform on which we can't save call stacks, but */
473/* gcc is normally used, we go ahead and define GC_ADD_CALLER. */
474/* We make this decision independent of whether gcc is actually being */
475/* used, in order to keep the interface consistent, and allow mixing */
476/* of compilers. */
477/* This may also be desirable if it is possible but expensive to */
478/* retrieve the call chain. */
479#if (defined(__linux__) || defined(__NetBSD__) || defined(__OpenBSD__) \
480 || defined(__FreeBSD__)) & !defined(GC_CAN_SAVE_CALL_STACKS)
481# define GC_ADD_CALLER
482# if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 95)
483 /* gcc knows how to retrieve return address, but we don't know */
484 /* how to generate call stacks. */
485# define GC_RETURN_ADDR (GC_word)__builtin_return_address(0)
486# else
487 /* Just pass 0 for gcc compatibility. */
488# define GC_RETURN_ADDR 0
489# endif
490#endif
491
492#ifdef GC_ADD_CALLER
493# define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__
494# define GC_EXTRA_PARAMS GC_word ra, GC_CONST char * s, int i
495#else
496# define GC_EXTRAS __FILE__, __LINE__
497# define GC_EXTRA_PARAMS GC_CONST char * s, int i
498#endif
499
500/* Debugging (annotated) allocation. GC_gcollect will check */
501/* objects allocated in this way for overwrites, etc. */
502GC_API GC_PTR GC_debug_malloc
503 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
504GC_API GC_PTR GC_debug_malloc_atomic
505 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
506GC_API GC_PTR GC_debug_malloc_uncollectable
507 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
508GC_API GC_PTR GC_debug_malloc_stubborn
509 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
510GC_API GC_PTR GC_debug_malloc_ignore_off_page
511 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
512GC_API GC_PTR GC_debug_malloc_atomic_ignore_off_page
513 GC_PROTO((size_t size_in_bytes, GC_EXTRA_PARAMS));
514GC_API void GC_debug_free GC_PROTO((GC_PTR object_addr));
515GC_API GC_PTR GC_debug_realloc
516 GC_PROTO((GC_PTR old_object, size_t new_size_in_bytes,
517 GC_EXTRA_PARAMS));
518GC_API void GC_debug_change_stubborn GC_PROTO((GC_PTR));
519GC_API void GC_debug_end_stubborn_change GC_PROTO((GC_PTR));
520
521/* Routines that allocate objects with debug information (like the */
522/* above), but just fill in dummy file and line number information. */
523/* Thus they can serve as drop-in malloc/realloc replacements. This */
524/* can be useful for two reasons: */
525/* 1) It allows the collector to be built with DBG_HDRS_ALL defined */
526/* even if some allocation calls come from 3rd party libraries */
527/* that can't be recompiled. */
528/* 2) On some platforms, the file and line information is redundant, */
529/* since it can be reconstructed from a stack trace. On such */
530/* platforms it may be more convenient not to recompile, e.g. for */
531/* leak detection. This can be accomplished by instructing the */
532/* linker to replace malloc/realloc with these. */
533GC_API GC_PTR GC_debug_malloc_replacement GC_PROTO((size_t size_in_bytes));
534GC_API GC_PTR GC_debug_realloc_replacement
535 GC_PROTO((GC_PTR object_addr, size_t size_in_bytes));
536
537# ifdef GC_DEBUG
538# define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS)
539# define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS)
540# define GC_MALLOC_UNCOLLECTABLE(sz) \
541 GC_debug_malloc_uncollectable(sz, GC_EXTRAS)
542# define GC_MALLOC_IGNORE_OFF_PAGE(sz) \
543 GC_debug_malloc_ignore_off_page(sz, GC_EXTRAS)
544# define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \
545 GC_debug_malloc_atomic_ignore_off_page(sz, GC_EXTRAS)
546# define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS)
547# define GC_FREE(p) GC_debug_free(p)
548# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
549 GC_debug_register_finalizer(p, f, d, of, od)
550# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
551 GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
552# define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
553 GC_debug_register_finalizer_no_order(p, f, d, of, od)
554# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
555# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
556# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
557# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
558 GC_general_register_disappearing_link(link, GC_base(obj))
559# define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n)
560# else
561# define GC_MALLOC(sz) GC_malloc(sz)
562# define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz)
563# define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz)
564# define GC_MALLOC_IGNORE_OFF_PAGE(sz) \
565 GC_malloc_ignore_off_page(sz)
566# define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \
567 GC_malloc_atomic_ignore_off_page(sz)
568# define GC_REALLOC(old, sz) GC_realloc(old, sz)
569# define GC_FREE(p) GC_free(p)
570# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
571 GC_register_finalizer(p, f, d, of, od)
572# define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \
573 GC_register_finalizer_ignore_self(p, f, d, of, od)
574# define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
575 GC_register_finalizer_no_order(p, f, d, of, od)
576# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
577# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
578# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
579# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
580 GC_general_register_disappearing_link(link, obj)
581# define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n)
582# endif
583/* The following are included because they are often convenient, and */
584/* reduce the chance for a misspecifed size argument. But calls may */
585/* expand to something syntactically incorrect if t is a complicated */
586/* type expression. */
587# define GC_NEW(t) (t *)GC_MALLOC(sizeof (t))
588# define GC_NEW_ATOMIC(t) (t *)GC_MALLOC_ATOMIC(sizeof (t))
589# define GC_NEW_STUBBORN(t) (t *)GC_MALLOC_STUBBORN(sizeof (t))
590# define GC_NEW_UNCOLLECTABLE(t) (t *)GC_MALLOC_UNCOLLECTABLE(sizeof (t))
591
592/* Finalization. Some of these primitives are grossly unsafe. */
593/* The idea is to make them both cheap, and sufficient to build */
594/* a safer layer, closer to PCedar finalization. */
595/* The interface represents my conclusions from a long discussion */
596/* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */
597/* Christian Jacobi, and Russ Atkinson. It's not perfect, and */
598/* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */
599typedef void (*GC_finalization_proc)
600 GC_PROTO((GC_PTR obj, GC_PTR client_data));
601
602GC_API void GC_register_finalizer
603 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
604 GC_finalization_proc *ofn, GC_PTR *ocd));
605GC_API void GC_debug_register_finalizer
606 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
607 GC_finalization_proc *ofn, GC_PTR *ocd));
608 /* When obj is no longer accessible, invoke */
609 /* (*fn)(obj, cd). If a and b are inaccessible, and */
610 /* a points to b (after disappearing links have been */
611 /* made to disappear), then only a will be */
612 /* finalized. (If this does not create any new */
613 /* pointers to b, then b will be finalized after the */
614 /* next collection.) Any finalizable object that */
615 /* is reachable from itself by following one or more */
616 /* pointers will not be finalized (or collected). */
617 /* Thus cycles involving finalizable objects should */
618 /* be avoided, or broken by disappearing links. */
619 /* All but the last finalizer registered for an object */
620 /* is ignored. */
621 /* Finalization may be removed by passing 0 as fn. */
622 /* Finalizers are implicitly unregistered just before */
623 /* they are invoked. */
624 /* The old finalizer and client data are stored in */
625 /* *ofn and *ocd. */
626 /* Fn is never invoked on an accessible object, */
627 /* provided hidden pointers are converted to real */
628 /* pointers only if the allocation lock is held, and */
629 /* such conversions are not performed by finalization */
630 /* routines. */
631 /* If GC_register_finalizer is aborted as a result of */
632 /* a signal, the object may be left with no */
633 /* finalization, even if neither the old nor new */
634 /* finalizer were NULL. */
635 /* Obj should be the nonNULL starting address of an */
636 /* object allocated by GC_malloc or friends. */
637 /* Note that any garbage collectable object referenced */
638 /* by cd will be considered accessible until the */
639 /* finalizer is invoked. */
640
641/* Another versions of the above follow. It ignores */
642/* self-cycles, i.e. pointers from a finalizable object to */
643/* itself. There is a stylistic argument that this is wrong, */
644/* but it's unavoidable for C++, since the compiler may */
645/* silently introduce these. It's also benign in that specific */
646/* case. And it helps if finalizable objects are split to */
647/* avoid cycles. */
648/* Note that cd will still be viewed as accessible, even if it */
649/* refers to the object itself. */
650GC_API void GC_register_finalizer_ignore_self
651 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
652 GC_finalization_proc *ofn, GC_PTR *ocd));
653GC_API void GC_debug_register_finalizer_ignore_self
654 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
655 GC_finalization_proc *ofn, GC_PTR *ocd));
656
657/* Another version of the above. It ignores all cycles. */
658/* It should probably only be used by Java implementations. */
659/* Note that cd will still be viewed as accessible, even if it */
660/* refers to the object itself. */
661GC_API void GC_register_finalizer_no_order
662 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
663 GC_finalization_proc *ofn, GC_PTR *ocd));
664GC_API void GC_debug_register_finalizer_no_order
665 GC_PROTO((GC_PTR obj, GC_finalization_proc fn, GC_PTR cd,
666 GC_finalization_proc *ofn, GC_PTR *ocd));
667
668
669/* The following routine may be used to break cycles between */
670/* finalizable objects, thus causing cyclic finalizable */
671/* objects to be finalized in the correct order. Standard */
672/* use involves calling GC_register_disappearing_link(&p), */
673/* where p is a pointer that is not followed by finalization */
674/* code, and should not be considered in determining */
675/* finalization order. */
676GC_API int GC_register_disappearing_link GC_PROTO((GC_PTR * /* link */));
677 /* Link should point to a field of a heap allocated */
678 /* object obj. *link will be cleared when obj is */
679 /* found to be inaccessible. This happens BEFORE any */
680 /* finalization code is invoked, and BEFORE any */
681 /* decisions about finalization order are made. */
682 /* This is useful in telling the finalizer that */
683 /* some pointers are not essential for proper */
684 /* finalization. This may avoid finalization cycles. */
685 /* Note that obj may be resurrected by another */
686 /* finalizer, and thus the clearing of *link may */
687 /* be visible to non-finalization code. */
688 /* There's an argument that an arbitrary action should */
689 /* be allowed here, instead of just clearing a pointer. */
690 /* But this causes problems if that action alters, or */
691 /* examines connectivity. */
692 /* Returns 1 if link was already registered, 0 */
693 /* otherwise. */
694 /* Only exists for backward compatibility. See below: */
695
696GC_API int GC_general_register_disappearing_link
697 GC_PROTO((GC_PTR * /* link */, GC_PTR obj));
698 /* A slight generalization of the above. *link is */
699 /* cleared when obj first becomes inaccessible. This */
700 /* can be used to implement weak pointers easily and */
701 /* safely. Typically link will point to a location */
702 /* holding a disguised pointer to obj. (A pointer */
703 /* inside an "atomic" object is effectively */
704 /* disguised.) In this way soft */
705 /* pointers are broken before any object */
706 /* reachable from them are finalized. Each link */
707 /* May be registered only once, i.e. with one obj */
708 /* value. This was added after a long email discussion */
709 /* with John Ellis. */
710 /* Obj must be a pointer to the first word of an object */
711 /* we allocated. It is unsafe to explicitly deallocate */
712 /* the object containing link. Explicitly deallocating */
713 /* obj may or may not cause link to eventually be */
714 /* cleared. */
715GC_API int GC_unregister_disappearing_link GC_PROTO((GC_PTR * /* link */));
716 /* Returns 0 if link was not actually registered. */
717 /* Undoes a registration by either of the above two */
718 /* routines. */
719
720/* Returns !=0 if GC_invoke_finalizers has something to do. */
721GC_API int GC_should_invoke_finalizers GC_PROTO((void));
722
723GC_API int GC_invoke_finalizers GC_PROTO((void));
724 /* Run finalizers for all objects that are ready to */
725 /* be finalized. Return the number of finalizers */
726 /* that were run. Normally this is also called */
727 /* implicitly during some allocations. If */
728 /* GC-finalize_on_demand is nonzero, it must be called */
729 /* explicitly. */
730
731/* GC_set_warn_proc can be used to redirect or filter warning messages. */
732/* p may not be a NULL pointer. */
733typedef void (*GC_warn_proc) GC_PROTO((char *msg, GC_word arg));
734GC_API GC_warn_proc GC_set_warn_proc GC_PROTO((GC_warn_proc p));
735 /* Returns old warning procedure. */
736
737GC_API GC_word GC_set_free_space_divisor GC_PROTO((GC_word value));
738 /* Set free_space_divisor. See above for definition. */
739 /* Returns old value. */
740
741/* The following is intended to be used by a higher level */
742/* (e.g. Java-like) finalization facility. It is expected */
743/* that finalization code will arrange for hidden pointers to */
744/* disappear. Otherwise objects can be accessed after they */
745/* have been collected. */
746/* Note that putting pointers in atomic objects or in */
747/* nonpointer slots of "typed" objects is equivalent to */
748/* disguising them in this way, and may have other advantages. */
749# if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS)
750 typedef GC_word GC_hidden_pointer;
751# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
752# define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
753 /* Converting a hidden pointer to a real pointer requires verifying */
754 /* that the object still exists. This involves acquiring the */
755 /* allocator lock to avoid a race with the collector. */
756# endif /* I_HIDE_POINTERS */
757
758typedef GC_PTR (*GC_fn_type) GC_PROTO((GC_PTR client_data));
759GC_API GC_PTR GC_call_with_alloc_lock
760 GC_PROTO((GC_fn_type fn, GC_PTR client_data));
761
762/* The following routines are primarily intended for use with a */
763/* preprocessor which inserts calls to check C pointer arithmetic. */
764
765/* Check that p and q point to the same object. */
766/* Fail conspicuously if they don't. */
767/* Returns the first argument. */
768/* Succeeds if neither p nor q points to the heap. */
769/* May succeed if both p and q point to between heap objects. */
770GC_API GC_PTR GC_same_obj GC_PROTO((GC_PTR p, GC_PTR q));
771
772/* Checked pointer pre- and post- increment operations. Note that */
773/* the second argument is in units of bytes, not multiples of the */
774/* object size. This should either be invoked from a macro, or the */
775/* call should be automatically generated. */
776GC_API GC_PTR GC_pre_incr GC_PROTO((GC_PTR *p, size_t how_much));
777GC_API GC_PTR GC_post_incr GC_PROTO((GC_PTR *p, size_t how_much));
778
779/* Check that p is visible */
780/* to the collector as a possibly pointer containing location. */
781/* If it isn't fail conspicuously. */
782/* Returns the argument in all cases. May erroneously succeed */
783/* in hard cases. (This is intended for debugging use with */
784/* untyped allocations. The idea is that it should be possible, though */
785/* slow, to add such a call to all indirect pointer stores.) */
786/* Currently useless for multithreaded worlds. */
787GC_API GC_PTR GC_is_visible GC_PROTO((GC_PTR p));
788
789/* Check that if p is a pointer to a heap page, then it points to */
790/* a valid displacement within a heap object. */
791/* Fail conspicuously if this property does not hold. */
792/* Uninteresting with GC_all_interior_pointers. */
793/* Always returns its argument. */
794GC_API GC_PTR GC_is_valid_displacement GC_PROTO((GC_PTR p));
795
796/* Safer, but slow, pointer addition. Probably useful mainly with */
797/* a preprocessor. Useful only for heap pointers. */
798#ifdef GC_DEBUG
799# define GC_PTR_ADD3(x, n, type_of_result) \
800 ((type_of_result)GC_same_obj((x)+(n), (x)))
801# define GC_PRE_INCR3(x, n, type_of_result) \
802 ((type_of_result)GC_pre_incr(&(x), (n)*sizeof(*x))
803# define GC_POST_INCR2(x, type_of_result) \
804 ((type_of_result)GC_post_incr(&(x), sizeof(*x))
805# ifdef __GNUC__
806# define GC_PTR_ADD(x, n) \
807 GC_PTR_ADD3(x, n, typeof(x))
808# define GC_PRE_INCR(x, n) \
809 GC_PRE_INCR3(x, n, typeof(x))
810# define GC_POST_INCR(x, n) \
811 GC_POST_INCR3(x, typeof(x))
812# else
813 /* We can't do this right without typeof, which ANSI */
814 /* decided was not sufficiently useful. Repeatedly */
815 /* mentioning the arguments seems too dangerous to be */
816 /* useful. So does not casting the result. */
817# define GC_PTR_ADD(x, n) ((x)+(n))
818# endif
819#else /* !GC_DEBUG */
820# define GC_PTR_ADD3(x, n, type_of_result) ((x)+(n))
821# define GC_PTR_ADD(x, n) ((x)+(n))
822# define GC_PRE_INCR3(x, n, type_of_result) ((x) += (n))
823# define GC_PRE_INCR(x, n) ((x) += (n))
824# define GC_POST_INCR2(x, n, type_of_result) ((x)++)
825# define GC_POST_INCR(x, n) ((x)++)
826#endif
827
828/* Safer assignment of a pointer to a nonstack location. */
829#ifdef GC_DEBUG
830# ifdef __STDC__
831# define GC_PTR_STORE(p, q) \
832 (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q))
833# else
834# define GC_PTR_STORE(p, q) \
835 (*(char **)GC_is_visible(p) = GC_is_valid_displacement(q))
836# endif
837#else /* !GC_DEBUG */
838# define GC_PTR_STORE(p, q) *((p) = (q))
839#endif
840
841/* Fynctions called to report pointer checking errors */
842GC_API void (*GC_same_obj_print_proc) GC_PROTO((GC_PTR p, GC_PTR q));
843
844GC_API void (*GC_is_valid_displacement_print_proc)
845 GC_PROTO((GC_PTR p));
846
847GC_API void (*GC_is_visible_print_proc)
848 GC_PROTO((GC_PTR p));
849
850
851/* For pthread support, we generally need to intercept a number of */
852/* thread library calls. We do that here by macro defining them. */
853
854#if !defined(GC_USE_LD_WRAP) && \
855 (defined(GC_PTHREADS) || defined(GC_SOLARIS_THREADS))
856# include "gc_pthread_redirects.h"
857#endif
858
859# if defined(PCR) || defined(GC_SOLARIS_THREADS) || \
860 defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
861 /* Any flavor of threads except SRC_M3. */
862/* This returns a list of objects, linked through their first */
863/* word. Its use can greatly reduce lock contention problems, since */
864/* the allocation lock can be acquired and released many fewer times. */
865/* lb must be large enough to hold the pointer field. */
866/* It is used internally by gc_local_alloc.h, which provides a simpler */
867/* programming interface on Linux. */
868GC_PTR GC_malloc_many(size_t lb);
869#define GC_NEXT(p) (*(GC_PTR *)(p)) /* Retrieve the next element */
870 /* in returned list. */
871extern void GC_thr_init(); /* Needed for Solaris/X86 */
872
873#endif /* THREADS && !SRC_M3 */
874
875#if defined(GC_WIN32_THREADS)
876# include <windows.h>
877# include <winbase.h>
878
879 /*
880 * All threads must be created using GC_CreateThread, so that they will be
881 * recorded in the thread table. For backwards compatibility, this is not
882 * technically true if the GC is built as a dynamic library, since it can
883 * and does then use DllMain to keep track of thread creations. But new code
884 * should be built to call GC_CreateThread.
885 */
886 HANDLE WINAPI GC_CreateThread(
887 LPSECURITY_ATTRIBUTES lpThreadAttributes,
888 DWORD dwStackSize, LPTHREAD_START_ROUTINE lpStartAddress,
889 LPVOID lpParameter, DWORD dwCreationFlags, LPDWORD lpThreadId );
890
891# if defined(_WIN32_WCE)
892 /*
893 * win32_threads.c implements the real WinMain, which will start a new thread
894 * to call GC_WinMain after initializing the garbage collector.
895 */
896 int WINAPI GC_WinMain(
897 HINSTANCE hInstance,
898 HINSTANCE hPrevInstance,
899 LPWSTR lpCmdLine,
900 int nCmdShow );
901
902# ifndef GC_BUILD
903# define WinMain GC_WinMain
904# define CreateThread GC_CreateThread
905# endif
906# endif /* defined(_WIN32_WCE) */
907
908#endif /* defined(GC_WIN32_THREADS) */
909
910/*
911 * If you are planning on putting
912 * the collector in a SunOS 5 dynamic library, you need to call GC_INIT()
913 * from the statically loaded program section.
914 * This circumvents a Solaris 2.X (X<=4) linker bug.
915 */
916#if defined(sparc) || defined(__sparc)
917# define GC_INIT() { extern end, etext; \
918 GC_noop(&end, &etext); }
919#else
920# if defined(__CYGWIN32__) && defined(GC_USE_DLL) || defined (_AIX)
921 /*
922 * Similarly gnu-win32 DLLs need explicit initialization from
923 * the main program, as does AIX.
924 */
925# define GC_INIT() { GC_add_roots(DATASTART, DATAEND); }
926# else
927# define GC_INIT()
928# endif
929#endif
930
931#if !defined(_WIN32_WCE) \
932 && ((defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \
933 || defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__))
934 /* win32S may not free all resources on process exit. */
935 /* This explicitly deallocates the heap. */
936 GC_API void GC_win32_free_heap ();
937#endif
938
939#if ( defined(_AMIGA) && !defined(GC_AMIGA_MAKINGLIB) )
940 /* Allocation really goes through GC_amiga_allocwrapper_do */
941# include "gc_amiga_redirects.h"
942#endif
943
944#if defined(GC_REDIRECT_TO_LOCAL) && !defined(GC_LOCAL_ALLOC_H)
945# include "gc_local_alloc.h"
946#endif
947
948#ifdef __cplusplus
949 } /* end of extern "C" */
950#endif
951
952#endif /* _GC_H */
diff --git a/gc/include/gc_alloc.h b/gc/include/gc_alloc.h
deleted file mode 100644
index c50a7589646..00000000000
--- a/gc/include/gc_alloc.h
+++ /dev/null
@@ -1,383 +0,0 @@
1/*
2 * Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
3 *
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
6 *
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
12 */
13
14//
15// This is a C++ header file that is intended to replace the SGI STL
16// alloc.h. This assumes SGI STL version < 3.0.
17//
18// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE
19// and -DALL_INTERIOR_POINTERS. We also recommend
20// -DREDIRECT_MALLOC=GC_uncollectable_malloc.
21//
22// Some of this could be faster in the explicit deallocation case. In particular,
23// we spend too much time clearing objects on the free lists. That could be avoided.
24//
25// This uses template classes with static members, and hence does not work
26// with g++ 2.7.2 and earlier.
27//
28// This code assumes that the collector itself has been compiled with a
29// compiler that defines __STDC__ .
30//
31
32#include "gc.h"
33
34#ifndef GC_ALLOC_H
35
36#define GC_ALLOC_H
37#define __ALLOC_H // Prevent inclusion of the default version. Ugly.
38#define __SGI_STL_ALLOC_H
39#define __SGI_STL_INTERNAL_ALLOC_H
40
41#ifndef __ALLOC
42# define __ALLOC alloc
43#endif
44
45#include <stddef.h>
46#include <string.h>
47
48// The following is just replicated from the conventional SGI alloc.h:
49
50template<class T, class alloc>
51class simple_alloc {
52
53public:
54 static T *allocate(size_t n)
55 { return 0 == n? 0 : (T*) alloc::allocate(n * sizeof (T)); }
56 static T *allocate(void)
57 { return (T*) alloc::allocate(sizeof (T)); }
58 static void deallocate(T *p, size_t n)
59 { if (0 != n) alloc::deallocate(p, n * sizeof (T)); }
60 static void deallocate(T *p)
61 { alloc::deallocate(p, sizeof (T)); }
62};
63
64#include "gc.h"
65
66// The following need to match collector data structures.
67// We can't include gc_priv.h, since that pulls in way too much stuff.
68// This should eventually be factored out into another include file.
69
70extern "C" {
71 extern void ** const GC_objfreelist_ptr;
72 extern void ** const GC_aobjfreelist_ptr;
73 extern void ** const GC_uobjfreelist_ptr;
74 extern void ** const GC_auobjfreelist_ptr;
75
76 extern void GC_incr_words_allocd(size_t words);
77 extern void GC_incr_mem_freed(size_t words);
78
79 extern char * GC_generic_malloc_words_small(size_t word, int kind);
80}
81
82// Object kinds; must match PTRFREE, NORMAL, UNCOLLECTABLE, and
83// AUNCOLLECTABLE in gc_priv.h.
84
85enum { GC_PTRFREE = 0, GC_NORMAL = 1, GC_UNCOLLECTABLE = 2,
86 GC_AUNCOLLECTABLE = 3 };
87
88enum { GC_max_fast_bytes = 255 };
89
90enum { GC_bytes_per_word = sizeof(char *) };
91
92enum { GC_byte_alignment = 8 };
93
94enum { GC_word_alignment = GC_byte_alignment/GC_bytes_per_word };
95
96inline void * &GC_obj_link(void * p)
97{ return *(void **)p; }
98
99// Compute a number of words >= n+1 bytes.
100// The +1 allows for pointers one past the end.
101inline size_t GC_round_up(size_t n)
102{
103 return ((n + GC_byte_alignment)/GC_byte_alignment)*GC_word_alignment;
104}
105
106// The same but don't allow for extra byte.
107inline size_t GC_round_up_uncollectable(size_t n)
108{
109 return ((n + GC_byte_alignment - 1)/GC_byte_alignment)*GC_word_alignment;
110}
111
112template <int dummy>
113class GC_aux_template {
114public:
115 // File local count of allocated words. Occasionally this is
116 // added into the global count. A separate count is necessary since the
117 // real one must be updated with a procedure call.
118 static size_t GC_words_recently_allocd;
119
120 // Same for uncollectable mmory. Not yet reflected in either
121 // GC_words_recently_allocd or GC_non_gc_bytes.
122 static size_t GC_uncollectable_words_recently_allocd;
123
124 // Similar counter for explicitly deallocated memory.
125 static size_t GC_mem_recently_freed;
126
127 // Again for uncollectable memory.
128 static size_t GC_uncollectable_mem_recently_freed;
129
130 static void * GC_out_of_line_malloc(size_t nwords, int kind);
131};
132
133template <int dummy>
134size_t GC_aux_template<dummy>::GC_words_recently_allocd = 0;
135
136template <int dummy>
137size_t GC_aux_template<dummy>::GC_uncollectable_words_recently_allocd = 0;
138
139template <int dummy>
140size_t GC_aux_template<dummy>::GC_mem_recently_freed = 0;
141
142template <int dummy>
143size_t GC_aux_template<dummy>::GC_uncollectable_mem_recently_freed = 0;
144
145template <int dummy>
146void * GC_aux_template<dummy>::GC_out_of_line_malloc(size_t nwords, int kind)
147{
148 GC_words_recently_allocd += GC_uncollectable_words_recently_allocd;
149 GC_non_gc_bytes +=
150 GC_bytes_per_word * GC_uncollectable_words_recently_allocd;
151 GC_uncollectable_words_recently_allocd = 0;
152
153 GC_mem_recently_freed += GC_uncollectable_mem_recently_freed;
154 GC_non_gc_bytes -=
155 GC_bytes_per_word * GC_uncollectable_mem_recently_freed;
156 GC_uncollectable_mem_recently_freed = 0;
157
158 GC_incr_words_allocd(GC_words_recently_allocd);
159 GC_words_recently_allocd = 0;
160
161 GC_incr_mem_freed(GC_mem_recently_freed);
162 GC_mem_recently_freed = 0;
163
164 return GC_generic_malloc_words_small(nwords, kind);
165}
166
167typedef GC_aux_template<0> GC_aux;
168
169// A fast, single-threaded, garbage-collected allocator
170// We assume the first word will be immediately overwritten.
171// In this version, deallocation is not a noop, and explicit
172// deallocation is likely to help performance.
173template <int dummy>
174class single_client_gc_alloc_template {
175 public:
176 static void * allocate(size_t n)
177 {
178 size_t nwords = GC_round_up(n);
179 void ** flh;
180 void * op;
181
182 if (n > GC_max_fast_bytes) return GC_malloc(n);
183 flh = GC_objfreelist_ptr + nwords;
184 if (0 == (op = *flh)) {
185 return GC_aux::GC_out_of_line_malloc(nwords, GC_NORMAL);
186 }
187 *flh = GC_obj_link(op);
188 GC_aux::GC_words_recently_allocd += nwords;
189 return op;
190 }
191 static void * ptr_free_allocate(size_t n)
192 {
193 size_t nwords = GC_round_up(n);
194 void ** flh;
195 void * op;
196
197 if (n > GC_max_fast_bytes) return GC_malloc_atomic(n);
198 flh = GC_aobjfreelist_ptr + nwords;
199 if (0 == (op = *flh)) {
200 return GC_aux::GC_out_of_line_malloc(nwords, GC_PTRFREE);
201 }
202 *flh = GC_obj_link(op);
203 GC_aux::GC_words_recently_allocd += nwords;
204 return op;
205 }
206 static void deallocate(void *p, size_t n)
207 {
208 size_t nwords = GC_round_up(n);
209 void ** flh;
210
211 if (n > GC_max_fast_bytes) {
212 GC_free(p);
213 } else {
214 flh = GC_objfreelist_ptr + nwords;
215 GC_obj_link(p) = *flh;
216 memset((char *)p + GC_bytes_per_word, 0,
217 GC_bytes_per_word * (nwords - 1));
218 *flh = p;
219 GC_aux::GC_mem_recently_freed += nwords;
220 }
221 }
222 static void ptr_free_deallocate(void *p, size_t n)
223 {
224 size_t nwords = GC_round_up(n);
225 void ** flh;
226
227 if (n > GC_max_fast_bytes) {
228 GC_free(p);
229 } else {
230 flh = GC_aobjfreelist_ptr + nwords;
231 GC_obj_link(p) = *flh;
232 *flh = p;
233 GC_aux::GC_mem_recently_freed += nwords;
234 }
235 }
236};
237
238typedef single_client_gc_alloc_template<0> single_client_gc_alloc;
239
240// Once more, for uncollectable objects.
241template <int dummy>
242class single_client_alloc_template {
243 public:
244 static void * allocate(size_t n)
245 {
246 size_t nwords = GC_round_up_uncollectable(n);
247 void ** flh;
248 void * op;
249
250 if (n > GC_max_fast_bytes) return GC_malloc_uncollectable(n);
251 flh = GC_uobjfreelist_ptr + nwords;
252 if (0 == (op = *flh)) {
253 return GC_aux::GC_out_of_line_malloc(nwords, GC_UNCOLLECTABLE);
254 }
255 *flh = GC_obj_link(op);
256 GC_aux::GC_uncollectable_words_recently_allocd += nwords;
257 return op;
258 }
259 static void * ptr_free_allocate(size_t n)
260 {
261 size_t nwords = GC_round_up_uncollectable(n);
262 void ** flh;
263 void * op;
264
265 if (n > GC_max_fast_bytes) return GC_malloc_atomic_uncollectable(n);
266 flh = GC_auobjfreelist_ptr + nwords;
267 if (0 == (op = *flh)) {
268 return GC_aux::GC_out_of_line_malloc(nwords, GC_AUNCOLLECTABLE);
269 }
270 *flh = GC_obj_link(op);
271 GC_aux::GC_uncollectable_words_recently_allocd += nwords;
272 return op;
273 }
274 static void deallocate(void *p, size_t n)
275 {
276 size_t nwords = GC_round_up_uncollectable(n);
277 void ** flh;
278
279 if (n > GC_max_fast_bytes) {
280 GC_free(p);
281 } else {
282 flh = GC_uobjfreelist_ptr + nwords;
283 GC_obj_link(p) = *flh;
284 *flh = p;
285 GC_aux::GC_uncollectable_mem_recently_freed += nwords;
286 }
287 }
288 static void ptr_free_deallocate(void *p, size_t n)
289 {
290 size_t nwords = GC_round_up_uncollectable(n);
291 void ** flh;
292
293 if (n > GC_max_fast_bytes) {
294 GC_free(p);
295 } else {
296 flh = GC_auobjfreelist_ptr + nwords;
297 GC_obj_link(p) = *flh;
298 *flh = p;
299 GC_aux::GC_uncollectable_mem_recently_freed += nwords;
300 }
301 }
302};
303
304typedef single_client_alloc_template<0> single_client_alloc;
305
306template < int dummy >
307class gc_alloc_template {
308 public:
309 static void * allocate(size_t n) { return GC_malloc(n); }
310 static void * ptr_free_allocate(size_t n)
311 { return GC_malloc_atomic(n); }
312 static void deallocate(void *, size_t) { }
313 static void ptr_free_deallocate(void *, size_t) { }
314};
315
316typedef gc_alloc_template < 0 > gc_alloc;
317
318template < int dummy >
319class alloc_template {
320 public:
321 static void * allocate(size_t n) { return GC_malloc_uncollectable(n); }
322 static void * ptr_free_allocate(size_t n)
323 { return GC_malloc_atomic_uncollectable(n); }
324 static void deallocate(void *p, size_t) { GC_free(p); }
325 static void ptr_free_deallocate(void *p, size_t) { GC_free(p); }
326};
327
328typedef alloc_template < 0 > alloc;
329
330#ifdef _SGI_SOURCE
331
332// We want to specialize simple_alloc so that it does the right thing
333// for all pointerfree types. At the moment there is no portable way to
334// even approximate that. The following approximation should work for
335// SGI compilers, and perhaps some others.
336
337# define __GC_SPECIALIZE(T,alloc) \
338class simple_alloc<T, alloc> { \
339public: \
340 static T *allocate(size_t n) \
341 { return 0 == n? 0 : \
342 (T*) alloc::ptr_free_allocate(n * sizeof (T)); } \
343 static T *allocate(void) \
344 { return (T*) alloc::ptr_free_allocate(sizeof (T)); } \
345 static void deallocate(T *p, size_t n) \
346 { if (0 != n) alloc::ptr_free_deallocate(p, n * sizeof (T)); } \
347 static void deallocate(T *p) \
348 { alloc::ptr_free_deallocate(p, sizeof (T)); } \
349};
350
351__GC_SPECIALIZE(char, gc_alloc)
352__GC_SPECIALIZE(int, gc_alloc)
353__GC_SPECIALIZE(unsigned, gc_alloc)
354__GC_SPECIALIZE(float, gc_alloc)
355__GC_SPECIALIZE(double, gc_alloc)
356
357__GC_SPECIALIZE(char, alloc)
358__GC_SPECIALIZE(int, alloc)
359__GC_SPECIALIZE(unsigned, alloc)
360__GC_SPECIALIZE(float, alloc)
361__GC_SPECIALIZE(double, alloc)
362
363__GC_SPECIALIZE(char, single_client_gc_alloc)
364__GC_SPECIALIZE(int, single_client_gc_alloc)
365__GC_SPECIALIZE(unsigned, single_client_gc_alloc)
366__GC_SPECIALIZE(float, single_client_gc_alloc)
367__GC_SPECIALIZE(double, single_client_gc_alloc)
368
369__GC_SPECIALIZE(char, single_client_alloc)
370__GC_SPECIALIZE(int, single_client_alloc)
371__GC_SPECIALIZE(unsigned, single_client_alloc)
372__GC_SPECIALIZE(float, single_client_alloc)
373__GC_SPECIALIZE(double, single_client_alloc)
374
375#ifdef __STL_USE_STD_ALLOCATORS
376
377???copy stuff from stl_alloc.h or remove it to a different file ???
378
379#endif /* __STL_USE_STD_ALLOCATORS */
380
381#endif /* _SGI_SOURCE */
382
383#endif /* GC_ALLOC_H */
diff --git a/gc/include/gc_allocator.h b/gc/include/gc_allocator.h
deleted file mode 100644
index 87c85099381..00000000000
--- a/gc/include/gc_allocator.h
+++ /dev/null
@@ -1,232 +0,0 @@
1/*
2 * Copyright (c) 1996-1997
3 * Silicon Graphics Computer Systems, Inc.
4 *
5 * Permission to use, copy, modify, distribute and sell this software
6 * and its documentation for any purpose is hereby granted without fee,
7 * provided that the above copyright notice appear in all copies and
8 * that both that copyright notice and this permission notice appear
9 * in supporting documentation. Silicon Graphics makes no
10 * representations about the suitability of this software for any
11 * purpose. It is provided "as is" without express or implied warranty.
12 *
13 * Copyright (c) 2002
14 * Hewlett-Packard Company
15 *
16 * Permission to use, copy, modify, distribute and sell this software
17 * and its documentation for any purpose is hereby granted without fee,
18 * provided that the above copyright notice appear in all copies and
19 * that both that copyright notice and this permission notice appear
20 * in supporting documentation. Hewlett-Packard Company makes no
21 * representations about the suitability of this software for any
22 * purpose. It is provided "as is" without express or implied warranty.
23 */
24
25/*
26 * This implements standard-conforming allocators that interact with
27 * the garbage collector. Gc_alloctor<T> allocates garbage-collectable
28 * objects of type T. Traceable_allocator<T> allocates objects that
29 * are not temselves garbage collected, but are scanned by the
30 * collector for pointers to collectable objects. Traceable_alloc
31 * should be used for explicitly managed STL containers that may
32 * point to collectable objects.
33 *
34 * This code was derived from an earlier version of the GNU C++ standard
35 * library, which itself was derived from the SGI STL implementation.
36 */
37
38#include "gc.h" // For size_t
39
40/* First some helpers to allow us to dispatch on whether or not a type
41 * is known to be pointerfree.
42 * These are private, except that the client may invoke the
43 * GC_DECLARE_PTRFREE macro.
44 */
45
46struct GC_true_type {};
47struct GC_false_type {};
48
49template <class GC_tp>
50struct GC_type_traits {
51 GC_false_type GC_is_ptr_free;
52};
53
54# define GC_DECLARE_PTRFREE(T) \
55template<> struct GC_type_traits<T> { GC_true_type GC_is_ptr_free; }
56
57GC_DECLARE_PTRFREE(signed char);
58GC_DECLARE_PTRFREE(unsigned char);
59GC_DECLARE_PTRFREE(signed short);
60GC_DECLARE_PTRFREE(unsigned short);
61GC_DECLARE_PTRFREE(signed int);
62GC_DECLARE_PTRFREE(unsigned int);
63GC_DECLARE_PTRFREE(signed long);
64GC_DECLARE_PTRFREE(unsigned long);
65GC_DECLARE_PTRFREE(float);
66GC_DECLARE_PTRFREE(double);
67/* The client may want to add others. */
68
69// In the following GC_Tp is GC_true_type iff we are allocating a
70// pointerfree object.
71template <class GC_Tp>
72inline void * GC_selective_alloc(size_t n, GC_Tp) {
73 return GC_MALLOC(n);
74}
75
76template <>
77inline void * GC_selective_alloc<GC_true_type>(size_t n, GC_true_type) {
78 return GC_MALLOC_ATOMIC(n);
79}
80
81/* Now the public gc_allocator<T> class:
82 */
83template <class GC_Tp>
84class gc_allocator {
85public:
86 typedef size_t size_type;
87 typedef ptrdiff_t difference_type;
88 typedef GC_Tp* pointer;
89 typedef const GC_Tp* const_pointer;
90 typedef GC_Tp& reference;
91 typedef const GC_Tp& const_reference;
92 typedef GC_Tp value_type;
93
94 template <class GC_Tp1> struct rebind {
95 typedef gc_allocator<GC_Tp1> other;
96 };
97
98 gc_allocator() {}
99# ifndef _MSC_VER
100 // I'm not sure why this is needed here in addition to the following.
101 // The standard specifies it for the standard allocator, but VC++ rejects
102 // it. -HB
103 gc_allocator(const gc_allocator&) throw() {}
104# endif
105 template <class GC_Tp1> gc_allocator(const gc_allocator<GC_Tp1>&) throw() {}
106 ~gc_allocator() throw() {}
107
108 pointer address(reference GC_x) const { return &GC_x; }
109 const_pointer address(const_reference GC_x) const { return &GC_x; }
110
111 // GC_n is permitted to be 0. The C++ standard says nothing about what
112 // the return value is when GC_n == 0.
113 GC_Tp* allocate(size_type GC_n, const void* = 0) {
114 GC_type_traits<GC_Tp> traits;
115 return static_cast<GC_Tp *>
116 (GC_selective_alloc(GC_n * sizeof(GC_Tp),
117 traits.GC_is_ptr_free));
118 }
119
120 // __p is not permitted to be a null pointer.
121 void deallocate(pointer __p, size_type GC_n)
122 { GC_FREE(__p); }
123
124 size_type max_size() const throw()
125 { return size_t(-1) / sizeof(GC_Tp); }
126
127 void construct(pointer __p, const GC_Tp& __val) { new(__p) GC_Tp(__val); }
128 void destroy(pointer __p) { __p->~GC_Tp(); }
129};
130
131template<>
132class gc_allocator<void> {
133 typedef size_t size_type;
134 typedef ptrdiff_t difference_type;
135 typedef void* pointer;
136 typedef const void* const_pointer;
137 typedef void value_type;
138
139 template <class GC_Tp1> struct rebind {
140 typedef gc_allocator<GC_Tp1> other;
141 };
142};
143
144
145template <class GC_T1, class GC_T2>
146inline bool operator==(const gc_allocator<GC_T1>&, const gc_allocator<GC_T2>&)
147{
148 return true;
149}
150
151template <class GC_T1, class GC_T2>
152inline bool operator!=(const gc_allocator<GC_T1>&, const gc_allocator<GC_T2>&)
153{
154 return false;
155}
156
157/*
158 * And the public traceable_allocator class.
159 */
160
161// Note that we currently don't specialize the pointer-free case, since a
162// pointer-free traceable container doesn't make that much sense,
163// though it could become an issue due to abstraction boundaries.
164template <class GC_Tp>
165class traceable_allocator {
166public:
167 typedef size_t size_type;
168 typedef ptrdiff_t difference_type;
169 typedef GC_Tp* pointer;
170 typedef const GC_Tp* const_pointer;
171 typedef GC_Tp& reference;
172 typedef const GC_Tp& const_reference;
173 typedef GC_Tp value_type;
174
175 template <class GC_Tp1> struct rebind {
176 typedef traceable_allocator<GC_Tp1> other;
177 };
178
179 traceable_allocator() throw() {}
180# ifndef _MSC_VER
181 traceable_allocator(const traceable_allocator&) throw() {}
182# endif
183 template <class GC_Tp1> traceable_allocator
184 (const traceable_allocator<GC_Tp1>&) throw() {}
185 ~traceable_allocator() throw() {}
186
187 pointer address(reference GC_x) const { return &GC_x; }
188 const_pointer address(const_reference GC_x) const { return &GC_x; }
189
190 // GC_n is permitted to be 0. The C++ standard says nothing about what
191 // the return value is when GC_n == 0.
192 GC_Tp* allocate(size_type GC_n, const void* = 0) {
193 return static_cast<GC_Tp*>(GC_MALLOC_UNCOLLECTABLE(GC_n * sizeof(GC_Tp)));
194 }
195
196 // __p is not permitted to be a null pointer.
197 void deallocate(pointer __p, size_type GC_n)
198 { GC_FREE(__p); }
199
200 size_type max_size() const throw()
201 { return size_t(-1) / sizeof(GC_Tp); }
202
203 void construct(pointer __p, const GC_Tp& __val) { new(__p) GC_Tp(__val); }
204 void destroy(pointer __p) { __p->~GC_Tp(); }
205};
206
207template<>
208class traceable_allocator<void> {
209 typedef size_t size_type;
210 typedef ptrdiff_t difference_type;
211 typedef void* pointer;
212 typedef const void* const_pointer;
213 typedef void value_type;
214
215 template <class GC_Tp1> struct rebind {
216 typedef traceable_allocator<GC_Tp1> other;
217 };
218};
219
220
221template <class GC_T1, class GC_T2>
222inline bool operator==(const traceable_allocator<GC_T1>&, const traceable_allocator<GC_T2>&)
223{
224 return true;
225}
226
227template <class GC_T1, class GC_T2>
228inline bool operator!=(const traceable_allocator<GC_T1>&, const traceable_allocator<GC_T2>&)
229{
230 return false;
231}
232
diff --git a/gc/include/gc_amiga_redirects.h b/gc/include/gc_amiga_redirects.h
deleted file mode 100644
index 9e975c8c832..00000000000
--- a/gc/include/gc_amiga_redirects.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef GC_AMIGA_REDIRECTS_H
2
3# define GC_AMIGA_REDIRECTS_H
4
5# if ( defined(_AMIGA) && !defined(GC_AMIGA_MAKINGLIB) )
6 extern void *GC_amiga_realloc(void *old_object,size_t new_size_in_bytes);
7# define GC_realloc(a,b) GC_amiga_realloc(a,b)
8 extern void GC_amiga_set_toany(void (*func)(void));
9 extern int GC_amiga_free_space_divisor_inc;
10 extern void *(*GC_amiga_allocwrapper_do) \
11 (size_t size,void *(*AllocFunction)(size_t size2));
12# define GC_malloc(a) \
13 (*GC_amiga_allocwrapper_do)(a,GC_malloc)
14# define GC_malloc_atomic(a) \
15 (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic)
16# define GC_malloc_uncollectable(a) \
17 (*GC_amiga_allocwrapper_do)(a,GC_malloc_uncollectable)
18# define GC_malloc_stubborn(a) \
19 (*GC_amiga_allocwrapper_do)(a,GC_malloc_stubborn)
20# define GC_malloc_atomic_uncollectable(a) \
21 (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_uncollectable)
22# define GC_malloc_ignore_off_page(a) \
23 (*GC_amiga_allocwrapper_do)(a,GC_malloc_ignore_off_page)
24# define GC_malloc_atomic_ignore_off_page(a) \
25 (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_ignore_off_page)
26# endif /* _AMIGA && !GC_AMIGA_MAKINGLIB */
27
28#endif /* GC_AMIGA_REDIRECTS_H */
29
30
diff --git a/gc/include/gc_backptr.h b/gc/include/gc_backptr.h
deleted file mode 100644
index 5899496e0fe..00000000000
--- a/gc/include/gc_backptr.h
+++ /dev/null
@@ -1,65 +0,0 @@
1/*
2 * This is a simple API to implement pointer back tracing, i.e.
3 * to answer questions such as "who is pointing to this" or
4 * "why is this object being retained by the collector"
5 *
6 * This API assumes that we have an ANSI C compiler.
7 *
8 * Most of these calls yield useful information on only after
9 * a garbage collection. Usually the client will first force
10 * a full collection and then gather information, preferably
11 * before much intervening allocation.
12 *
13 * The implementation of the interface is only about 99.9999%
14 * correct. It is intended to be good enough for profiling,
15 * but is not intended to be used with production code.
16 *
17 * Results are likely to be much more useful if all allocation is
18 * accomplished through the debugging allocators.
19 *
20 * The implementation idea is due to A. Demers.
21 */
22
23#ifndef GC_BACKPTR_H
24#define GC_BACKPTR_H
25/* Store information about the object referencing dest in *base_p */
26/* and *offset_p. */
27/* If multiple objects or roots point to dest, the one reported */
28/* will be the last on used by the garbage collector to trace the */
29/* object. */
30/* source is root ==> *base_p = address, *offset_p = 0 */
31/* source is heap object ==> *base_p != 0, *offset_p = offset */
32/* Returns 1 on success, 0 if source couldn't be determined. */
33/* Dest can be any address within a heap object. */
34typedef enum { GC_UNREFERENCED, /* No reference info available. */
35 GC_NO_SPACE, /* Dest not allocated with debug alloc */
36 GC_REFD_FROM_ROOT, /* Referenced directly by root *base_p */
37 GC_REFD_FROM_REG, /* Referenced from a register, i.e. */
38 /* a root without an address. */
39 GC_REFD_FROM_HEAP, /* Referenced from another heap obj. */
40 GC_FINALIZER_REFD /* Finalizable and hence accessible. */
41} GC_ref_kind;
42
43GC_ref_kind GC_get_back_ptr_info(void *dest, void **base_p, size_t *offset_p);
44
45/* Generate a random heap address. */
46/* The resulting address is in the heap, but */
47/* not necessarily inside a valid object. */
48void * GC_generate_random_heap_address(void);
49
50/* Generate a random address inside a valid marked heap object. */
51void * GC_generate_random_valid_address(void);
52
53/* Force a garbage collection and generate a backtrace from a */
54/* random heap address. */
55/* This uses the GC logging mechanism (GC_printf) to produce */
56/* output. It can often be called from a debugger. The */
57/* source in dbg_mlc.c also serves as a sample client. */
58void GC_generate_random_backtrace(void);
59
60/* Print a backtrace from a specific address. Used by the */
61/* above. The client should call GC_gcollect() immediately */
62/* before invocation. */
63void GC_print_backtrace(void *);
64
65#endif /* GC_BACKPTR_H */
diff --git a/gc/include/gc_config_macros.h b/gc/include/gc_config_macros.h
deleted file mode 100644
index 763e02fb3aa..00000000000
--- a/gc/include/gc_config_macros.h
+++ /dev/null
@@ -1,140 +0,0 @@
1/*
2 * This should never be included directly. It is included only from gc.h.
3 * We separate it only to make gc.h more suitable as documentation.
4 *
5 * Some tests for old macros. These violate our namespace rules and will
6 * disappear shortly. Use the GC_ names.
7 */
8#if defined(SOLARIS_THREADS) || defined(_SOLARIS_THREADS)
9# define GC_SOLARIS_THREADS
10#endif
11#if defined(_SOLARIS_PTHREADS)
12# define GC_SOLARIS_PTHREADS
13#endif
14#if defined(IRIX_THREADS)
15# define GC_IRIX_THREADS
16#endif
17#if defined(DGUX_THREADS)
18# if !defined(GC_DGUX386_THREADS)
19# define GC_DGUX386_THREADS
20# endif
21#endif
22#if defined(HPUX_THREADS)
23# define GC_HPUX_THREADS
24#endif
25#if defined(OSF1_THREADS)
26# define GC_OSF1_THREADS
27#endif
28#if defined(LINUX_THREADS)
29# define GC_LINUX_THREADS
30#endif
31#if defined(WIN32_THREADS)
32# define GC_WIN32_THREADS
33#endif
34#if defined(USE_LD_WRAP)
35# define GC_USE_LD_WRAP
36#endif
37
38#if !defined(_REENTRANT) && (defined(GC_SOLARIS_THREADS) \
39 || defined(GC_SOLARIS_PTHREADS) \
40 || defined(GC_HPUX_THREADS) \
41 || defined(GC_LINUX_THREADS))
42# define _REENTRANT
43 /* Better late than never. This fails if system headers that */
44 /* depend on this were previously included. */
45#endif
46
47#if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)
48# define _POSIX4A_DRAFT10_SOURCE 1
49#endif
50
51# if defined(GC_SOLARIS_PTHREADS) || defined(GC_FREEBSD_THREADS) || \
52 defined(GC_IRIX_THREADS) || defined(GC_LINUX_THREADS) || \
53 defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS) || \
54 defined(GC_DGUX386_THREADS) || defined(GC_MACOSX_THREADS) || \
55 (defined(GC_WIN32_THREADS) && defined(__CYGWIN32__))
56# define GC_PTHREADS
57# endif
58
59#if defined(GC_THREADS) && !defined(GC_PTHREADS)
60# if defined(__linux__)
61# define GC_LINUX_THREADS
62# define GC_PTHREADS
63# endif
64# if !defined(LINUX) && (defined(_PA_RISC1_1) || defined(_PA_RISC2_0) \
65 || defined(hppa) || defined(__HPPA))
66# define GC_HPUX_THREADS
67# define GC_PTHREADS
68# endif
69# if !defined(__linux__) && (defined(__alpha) || defined(__alpha__))
70# define GC_OSF1_THREADS
71# define GC_PTHREADS
72# endif
73# if defined(__mips) && !defined(__linux__)
74# define GC_IRIX_THREADS
75# define GC_PTHREADS
76# endif
77# if defined(__sparc) && !defined(__linux__)
78# define GC_SOLARIS_PTHREADS
79# define GC_PTHREADS
80# endif
81# if defined(__APPLE__) && defined(__MACH__) && defined(__ppc__)
82# define GC_MACOSX_THREADS
83# define GC_PTHREADS
84# endif
85# if !defined(GC_PTHREADS) && defined(__FreeBSD__)
86# define GC_FREEBSD_THREADS
87# define GC_PTHREADS
88# endif
89# if defined(DGUX) && (defined(i386) || defined(__i386__))
90# define GC_DGUX386_THREADS
91# define GC_PTHREADS
92# endif
93#endif /* GC_THREADS */
94
95#if defined(GC_THREADS) && !defined(GC_PTHREADS) && defined(MSWIN32)
96# define GC_WIN32_THREADS
97#endif
98
99#if defined(GC_SOLARIS_PTHREADS) && !defined(GC_SOLARIS_THREADS)
100# define GC_SOLARIS_THREADS
101#endif
102
103# define __GC
104# include <stddef.h>
105# ifdef _WIN32_WCE
106/* Yet more kluges for WinCE */
107# include <stdlib.h> /* size_t is defined here */
108 typedef long ptrdiff_t; /* ptrdiff_t is not defined */
109# endif
110
111#if defined(__MINGW32__) && defined(_DLL) && !defined(GC_NOT_DLL)
112# ifdef GC_BUILD
113# define GC_API __declspec(dllexport)
114# else
115# define GC_API __declspec(dllimport)
116# endif
117#endif
118
119#if (defined(__DMC__) || defined(_MSC_VER)) \
120 && (defined(_DLL) && !defined(GC_NOT_DLL) \
121 || defined(GC_DLL))
122# ifdef GC_BUILD
123# define GC_API extern __declspec(dllexport)
124# else
125# define GC_API __declspec(dllimport)
126# endif
127#endif
128
129#if defined(__WATCOMC__) && defined(GC_DLL)
130# ifdef GC_BUILD
131# define GC_API extern __declspec(dllexport)
132# else
133# define GC_API extern __declspec(dllimport)
134# endif
135#endif
136
137#ifndef GC_API
138#define GC_API extern
139#endif
140
diff --git a/gc/include/gc_cpp.h b/gc/include/gc_cpp.h
deleted file mode 100644
index d789a3731e3..00000000000
--- a/gc/include/gc_cpp.h
+++ /dev/null
@@ -1,362 +0,0 @@
1#ifndef GC_CPP_H
2#define GC_CPP_H
3/****************************************************************************
4Copyright (c) 1994 by Xerox Corporation. All rights reserved.
5
6THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8
9Permission is hereby granted to use or copy this program for any
10purpose, provided the above notices are retained on all copies.
11Permission to modify the code and to distribute modified code is
12granted, provided the above notices are retained, and a notice that
13the code was modified is included with the above copyright notice.
14****************************************************************************
15
16C++ Interface to the Boehm Collector
17
18 John R. Ellis and Jesse Hull
19
20This interface provides access to the Boehm collector. It provides
21basic facilities similar to those described in "Safe, Efficient
22Garbage Collection for C++", by John R. Elis and David L. Detlefs
23(ftp://ftp.parc.xerox.com/pub/ellis/gc).
24
25All heap-allocated objects are either "collectable" or
26"uncollectable". Programs must explicitly delete uncollectable
27objects, whereas the garbage collector will automatically delete
28collectable objects when it discovers them to be inaccessible.
29Collectable objects may freely point at uncollectable objects and vice
30versa.
31
32Objects allocated with the built-in "::operator new" are uncollectable.
33
34Objects derived from class "gc" are collectable. For example:
35
36 class A: public gc {...};
37 A* a = new A; // a is collectable.
38
39Collectable instances of non-class types can be allocated using the GC
40(or UseGC) placement:
41
42 typedef int A[ 10 ];
43 A* a = new (GC) A;
44
45Uncollectable instances of classes derived from "gc" can be allocated
46using the NoGC placement:
47
48 class A: public gc {...};
49 A* a = new (NoGC) A; // a is uncollectable.
50
51Both uncollectable and collectable objects can be explicitly deleted
52with "delete", which invokes an object's destructors and frees its
53storage immediately.
54
55A collectable object may have a clean-up function, which will be
56invoked when the collector discovers the object to be inaccessible.
57An object derived from "gc_cleanup" or containing a member derived
58from "gc_cleanup" has a default clean-up function that invokes the
59object's destructors. Explicit clean-up functions may be specified as
60an additional placement argument:
61
62 A* a = ::new (GC, MyCleanup) A;
63
64An object is considered "accessible" by the collector if it can be
65reached by a path of pointers from static variables, automatic
66variables of active functions, or from some object with clean-up
67enabled; pointers from an object to itself are ignored.
68
69Thus, if objects A and B both have clean-up functions, and A points at
70B, B is considered accessible. After A's clean-up is invoked and its
71storage released, B will then become inaccessible and will have its
72clean-up invoked. If A points at B and B points to A, forming a
73cycle, then that's considered a storage leak, and neither will be
74collectable. See the interface gc.h for low-level facilities for
75handling such cycles of objects with clean-up.
76
77The collector cannot guarrantee that it will find all inaccessible
78objects. In practice, it finds almost all of them.
79
80
81Cautions:
82
831. Be sure the collector has been augmented with "make c++".
84
852. If your compiler supports the new "operator new[]" syntax, then
86add -DGC_OPERATOR_NEW_ARRAY to the Makefile.
87
88If your compiler doesn't support "operator new[]", beware that an
89array of type T, where T is derived from "gc", may or may not be
90allocated as a collectable object (it depends on the compiler). Use
91the explicit GC placement to make the array collectable. For example:
92
93 class A: public gc {...};
94 A* a1 = new A[ 10 ]; // collectable or uncollectable?
95 A* a2 = new (GC) A[ 10 ]; // collectable
96
973. The destructors of collectable arrays of objects derived from
98"gc_cleanup" will not be invoked properly. For example:
99
100 class A: public gc_cleanup {...};
101 A* a = new (GC) A[ 10 ]; // destructors not invoked correctly
102
103Typically, only the destructor for the first element of the array will
104be invoked when the array is garbage-collected. To get all the
105destructors of any array executed, you must supply an explicit
106clean-up function:
107
108 A* a = new (GC, MyCleanUp) A[ 10 ];
109
110(Implementing clean-up of arrays correctly, portably, and in a way
111that preserves the correct exception semantics requires a language
112extension, e.g. the "gc" keyword.)
113
1144. Compiler bugs:
115
116* Solaris 2's CC (SC3.0) doesn't implement t->~T() correctly, so the
117destructors of classes derived from gc_cleanup won't be invoked.
118You'll have to explicitly register a clean-up function with
119new-placement syntax.
120
121* Evidently cfront 3.0 does not allow destructors to be explicitly
122invoked using the ANSI-conforming syntax t->~T(). If you're using
123cfront 3.0, you'll have to comment out the class gc_cleanup, which
124uses explicit invocation.
125
1265. GC name conflicts:
127
128Many other systems seem to use the identifier "GC" as an abbreviation
129for "Graphics Context". Since version 5.0, GC placement has been replaced
130by UseGC. GC is an alias for UseGC, unless GC_NAME_CONFLICT is defined.
131
132****************************************************************************/
133
134#include "gc.h"
135
136#ifndef THINK_CPLUS
137# define GC_cdecl
138#else
139# define GC_cdecl _cdecl
140#endif
141
142#if ! defined( GC_NO_OPERATOR_NEW_ARRAY ) \
143 && !defined(_ENABLE_ARRAYNEW) /* Digimars */ \
144 && (defined(__BORLANDC__) && (__BORLANDC__ < 0x450) \
145 || (defined(__GNUC__) && \
146 (__GNUC__ < 2 || __GNUC__ == 2 && __GNUC_MINOR__ < 6)) \
147 || (defined(__WATCOMC__) && __WATCOMC__ < 1050))
148# define GC_NO_OPERATOR_NEW_ARRAY
149#endif
150
151#if !defined(GC_NO_OPERATOR_NEW_ARRAY) && !defined(GC_OPERATOR_NEW_ARRAY)
152# define GC_OPERATOR_NEW_ARRAY
153#endif
154
155enum GCPlacement {UseGC,
156#ifndef GC_NAME_CONFLICT
157 GC=UseGC,
158#endif
159 NoGC, PointerFreeGC};
160
161class gc {public:
162 inline void* operator new( size_t size );
163 inline void* operator new( size_t size, GCPlacement gcp );
164 inline void* operator new( size_t size, void *p );
165 /* Must be redefined here, since the other overloadings */
166 /* hide the global definition. */
167 inline void operator delete( void* obj );
168# ifndef __BORLANDC__ /* Confuses the Borland compiler. */
169 inline void operator delete( void*, void* );
170# endif
171
172#ifdef GC_OPERATOR_NEW_ARRAY
173 inline void* operator new[]( size_t size );
174 inline void* operator new[]( size_t size, GCPlacement gcp );
175 inline void* operator new[]( size_t size, void *p );
176 inline void operator delete[]( void* obj );
177# ifndef __BORLANDC__
178 inline void gc::operator delete[]( void*, void* );
179# endif
180#endif /* GC_OPERATOR_NEW_ARRAY */
181 };
182 /*
183 Instances of classes derived from "gc" will be allocated in the
184 collected heap by default, unless an explicit NoGC placement is
185 specified. */
186
187class gc_cleanup: virtual public gc {public:
188 inline gc_cleanup();
189 inline virtual ~gc_cleanup();
190private:
191 inline static void GC_cdecl cleanup( void* obj, void* clientData );};
192 /*
193 Instances of classes derived from "gc_cleanup" will be allocated
194 in the collected heap by default. When the collector discovers an
195 inaccessible object derived from "gc_cleanup" or containing a
196 member derived from "gc_cleanup", its destructors will be
197 invoked. */
198
199extern "C" {typedef void (*GCCleanUpFunc)( void* obj, void* clientData );}
200
201#ifdef _MSC_VER
202 // Disable warning that "no matching operator delete found; memory will
203 // not be freed if initialization throws an exception"
204# pragma warning(disable:4291)
205#endif
206
207inline void* operator new(
208 size_t size,
209 GCPlacement gcp,
210 GCCleanUpFunc cleanup = 0,
211 void* clientData = 0 );
212 /*
213 Allocates a collectable or uncollected object, according to the
214 value of "gcp".
215
216 For collectable objects, if "cleanup" is non-null, then when the
217 allocated object "obj" becomes inaccessible, the collector will
218 invoke the function "cleanup( obj, clientData )" but will not
219 invoke the object's destructors. It is an error to explicitly
220 delete an object allocated with a non-null "cleanup".
221
222 It is an error to specify a non-null "cleanup" with NoGC or for
223 classes derived from "gc_cleanup" or containing members derived
224 from "gc_cleanup". */
225
226
227#ifdef _MSC_VER
228 /** This ensures that the system default operator new[] doesn't get
229 * undefined, which is what seems to happen on VC++ 6 for some reason
230 * if we define a multi-argument operator new[].
231 * There seems to be really redirect new in this environment without
232 * including this everywhere.
233 */
234 void *operator new[]( size_t size );
235
236 void operator delete[](void* obj);
237
238 void* operator new( size_t size);
239
240 void operator delete(void* obj);
241
242 // This new operator is used by VC++ in case of Debug builds !
243 void* operator new( size_t size,
244 int ,//nBlockUse,
245 const char * szFileName,
246 int nLine );
247#endif /* _MSC_VER */
248
249
250#ifdef GC_OPERATOR_NEW_ARRAY
251
252inline void* operator new[](
253 size_t size,
254 GCPlacement gcp,
255 GCCleanUpFunc cleanup = 0,
256 void* clientData = 0 );
257 /*
258 The operator new for arrays, identical to the above. */
259
260#endif /* GC_OPERATOR_NEW_ARRAY */
261
262/****************************************************************************
263
264Inline implementation
265
266****************************************************************************/
267
268inline void* gc::operator new( size_t size ) {
269 return GC_MALLOC( size );}
270
271inline void* gc::operator new( size_t size, GCPlacement gcp ) {
272 if (gcp == UseGC)
273 return GC_MALLOC( size );
274 else if (gcp == PointerFreeGC)
275 return GC_MALLOC_ATOMIC( size );
276 else
277 return GC_MALLOC_UNCOLLECTABLE( size );}
278
279inline void* gc::operator new( size_t size, void *p ) {
280 return p;}
281
282inline void gc::operator delete( void* obj ) {
283 GC_FREE( obj );}
284
285#ifndef __BORLANDC__
286 inline void gc::operator delete( void*, void* ) {}
287#endif
288
289#ifdef GC_OPERATOR_NEW_ARRAY
290
291inline void* gc::operator new[]( size_t size ) {
292 return gc::operator new( size );}
293
294inline void* gc::operator new[]( size_t size, GCPlacement gcp ) {
295 return gc::operator new( size, gcp );}
296
297inline void* gc::operator new[]( size_t size, void *p ) {
298 return p;}
299
300inline void gc::operator delete[]( void* obj ) {
301 gc::operator delete( obj );}
302
303#ifndef __BORLANDC__
304 inline void gc::operator delete[]( void*, void* ) {}
305#endif
306
307#endif /* GC_OPERATOR_NEW_ARRAY */
308
309
310inline gc_cleanup::~gc_cleanup() {
311 GC_register_finalizer_ignore_self( GC_base(this), 0, 0, 0, 0 );}
312
313inline void gc_cleanup::cleanup( void* obj, void* displ ) {
314 ((gc_cleanup*) ((char*) obj + (ptrdiff_t) displ))->~gc_cleanup();}
315
316inline gc_cleanup::gc_cleanup() {
317 GC_finalization_proc oldProc;
318 void* oldData;
319 void* base = GC_base( (void *) this );
320 if (0 != base) {
321 // Don't call the debug version, since this is a real base address.
322 GC_register_finalizer_ignore_self(
323 base, (GC_finalization_proc)cleanup, (void*) ((char*) this - (char*) base),
324 &oldProc, &oldData );
325 if (0 != oldProc) {
326 GC_register_finalizer_ignore_self( base, oldProc, oldData, 0, 0 );}}}
327
328inline void* operator new(
329 size_t size,
330 GCPlacement gcp,
331 GCCleanUpFunc cleanup,
332 void* clientData )
333{
334 void* obj;
335
336 if (gcp == UseGC) {
337 obj = GC_MALLOC( size );
338 if (cleanup != 0)
339 GC_REGISTER_FINALIZER_IGNORE_SELF(
340 obj, cleanup, clientData, 0, 0 );}
341 else if (gcp == PointerFreeGC) {
342 obj = GC_MALLOC_ATOMIC( size );}
343 else {
344 obj = GC_MALLOC_UNCOLLECTABLE( size );};
345 return obj;}
346
347
348#ifdef GC_OPERATOR_NEW_ARRAY
349
350inline void* operator new[](
351 size_t size,
352 GCPlacement gcp,
353 GCCleanUpFunc cleanup,
354 void* clientData )
355{
356 return ::operator new( size, gcp, cleanup, clientData );}
357
358#endif /* GC_OPERATOR_NEW_ARRAY */
359
360
361#endif /* GC_CPP_H */
362
diff --git a/gc/include/gc_gcj.h b/gc/include/gc_gcj.h
deleted file mode 100644
index 5e932afe8d0..00000000000
--- a/gc/include/gc_gcj.h
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright 1999 by Hewlett-Packard Company. All rights reserved.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17/* This file assumes the collector has been compiled with GC_GCJ_SUPPORT */
18/* and that an ANSI C compiler is available. */
19
20/*
21 * We allocate objects whose first word contains a pointer to a struct
22 * describing the object type. This struct contains a garbage collector mark
23 * descriptor at offset MARK_DESCR_OFFSET. Alternatively, the objects
24 * may be marked by the mark procedure passed to GC_init_gcj_malloc.
25 */
26
27#ifndef GC_GCJ_H
28
29#define GC_GCJ_H
30
31#ifndef MARK_DESCR_OFFSET
32# define MARK_DESCR_OFFSET sizeof(word)
33#endif
34 /* Gcj keeps GC descriptor as second word of vtable. This */
35 /* probably needs to be adjusted for other clients. */
36 /* We currently assume that this offset is such that: */
37 /* - all objects of this kind are large enough to have */
38 /* a value at that offset, and */
39 /* - it is not zero. */
40 /* These assumptions allow objects on the free list to be */
41 /* marked normally. */
42
43#ifndef _GC_H
44# include "gc.h"
45#endif
46
47/* The following allocators signal an out of memory condition with */
48/* return GC_oom_fn(bytes); */
49
50/* The following function must be called before the gcj allocators */
51/* can be invoked. */
52/* mp_index and mp are the index and mark_proc (see gc_mark.h) */
53/* respectively for the allocated objects. Mark_proc will be */
54/* used to build the descriptor for objects allocated through the */
55/* debugging interface. The mark_proc will be invoked on all such */
56/* objects with an "environment" value of 1. The client may chose */
57/* to use the same mark_proc for some of its generated mark descriptors.*/
58/* In that case, it should use a different "environment" value to */
59/* detect the presence or absence of the debug header. */
60/* Mp is really of type mark_proc, as defined in gc_mark.h. We don't */
61/* want to include that here for namespace pollution reasons. */
62extern void GC_init_gcj_malloc(int mp_index, void * /* really mark_proc */mp);
63
64/* Allocate an object, clear it, and store the pointer to the */
65/* type structure (vtable in gcj). */
66/* This adds a byte at the end of the object if GC_malloc would.*/
67extern void * GC_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr);
68/* The debug versions allocate such that the specified mark_proc */
69/* is always invoked. */
70extern void * GC_debug_gcj_malloc(size_t lb,
71 void * ptr_to_struct_containing_descr,
72 GC_EXTRA_PARAMS);
73
74/* Similar to the above, but the size is in words, and we don't */
75/* adjust it. The size is assumed to be such that it can be */
76/* allocated as a small object. */
77/* Unless it is known that the collector is not configured */
78/* with USE_MARK_BYTES and unless it is known that the object */
79/* has weak alignment requirements, lw must be even. */
80extern void * GC_gcj_fast_malloc(size_t lw,
81 void * ptr_to_struct_containing_descr);
82extern void * GC_debug_gcj_fast_malloc(size_t lw,
83 void * ptr_to_struct_containing_descr,
84 GC_EXTRA_PARAMS);
85
86/* Similar to GC_gcj_malloc, but assumes that a pointer to near the */
87/* beginning of the resulting object is always maintained. */
88extern void * GC_gcj_malloc_ignore_off_page(size_t lb,
89 void * ptr_to_struct_containing_descr);
90
91# ifdef GC_DEBUG
92# define GC_GCJ_MALLOC(s,d) GC_debug_gcj_malloc(s,d,GC_EXTRAS)
93# define GC_GCJ_FAST_MALLOC(s,d) GC_debug_gcj_fast_malloc(s,d,GC_EXTRAS)
94# define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d) GC_debug_gcj_malloc(s,d,GC_EXTRAS)
95# else
96# define GC_GCJ_MALLOC(s,d) GC_gcj_malloc(s,d)
97# define GC_GCJ_FAST_MALLOC(s,d) GC_gcj_fast_malloc(s,d)
98# define GC_GCJ_MALLOC_IGNORE_OFF_PAGE(s,d) \
99 GC_gcj_malloc_ignore_off_page(s,d)
100# endif
101
102#endif /* GC_GCJ_H */
diff --git a/gc/include/gc_inl.h b/gc/include/gc_inl.h
deleted file mode 100644
index c535cfd73fc..00000000000
--- a/gc/include/gc_inl.h
+++ /dev/null
@@ -1,107 +0,0 @@
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 *
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 *
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
13 */
14/* Boehm, October 3, 1995 2:07 pm PDT */
15
16# ifndef GC_PRIVATE_H
17# include "private/gc_priv.h"
18# endif
19
20/* USE OF THIS FILE IS NOT RECOMMENDED unless GC_all_interior_pointers */
21/* is always set, or the collector has been built with */
22/* -DDONT_ADD_BYTE_AT_END, or the specified size includes a pointerfree */
23/* word at the end. In the standard collector configuration, */
24/* the final word of each object may not be scanned. */
25/* This iinterface is most useful for compilers that generate C. */
26/* Manual use is hereby discouraged. */
27
28/* Allocate n words (NOT BYTES). X is made to point to the result. */
29/* It is assumed that n < MAXOBJSZ, and */
30/* that n > 0. On machines requiring double word alignment of some */
31/* data, we also assume that n is 1 or even. */
32/* If the collector is built with -DUSE_MARK_BYTES or -DPARALLEL_MARK, */
33/* the n = 1 case is also disallowed. */
34/* Effectively this means that portable code should make sure n is even.*/
35/* This bypasses the */
36/* MERGE_SIZES mechanism. In order to minimize the number of distinct */
37/* free lists that are maintained, the caller should ensure that a */
38/* small number of distinct values of n are used. (The MERGE_SIZES */
39/* mechanism normally does this by ensuring that only the leading three */
40/* bits of n may be nonzero. See misc.c for details.) We really */
41/* recommend this only in cases in which n is a constant, and no */
42/* locking is required. */
43/* In that case it may allow the compiler to perform substantial */
44/* additional optimizations. */
45# define GC_MALLOC_WORDS(result,n) \
46{ \
47 register ptr_t op; \
48 register ptr_t *opp; \
49 DCL_LOCK_STATE; \
50 \
51 opp = &(GC_objfreelist[n]); \
52 FASTLOCK(); \
53 if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
54 FASTUNLOCK(); \
55 (result) = GC_generic_malloc_words_small((n), NORMAL); \
56 } else { \
57 *opp = obj_link(op); \
58 obj_link(op) = 0; \
59 GC_words_allocd += (n); \
60 FASTUNLOCK(); \
61 (result) = (GC_PTR) op; \
62 } \
63}
64
65
66/* The same for atomic objects: */
67# define GC_MALLOC_ATOMIC_WORDS(result,n) \
68{ \
69 register ptr_t op; \
70 register ptr_t *opp; \
71 DCL_LOCK_STATE; \
72 \
73 opp = &(GC_aobjfreelist[n]); \
74 FASTLOCK(); \
75 if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
76 FASTUNLOCK(); \
77 (result) = GC_generic_malloc_words_small((n), PTRFREE); \
78 } else { \
79 *opp = obj_link(op); \
80 obj_link(op) = 0; \
81 GC_words_allocd += (n); \
82 FASTUNLOCK(); \
83 (result) = (GC_PTR) op; \
84 } \
85}
86
87/* And once more for two word initialized objects: */
88# define GC_CONS(result, first, second) \
89{ \
90 register ptr_t op; \
91 register ptr_t *opp; \
92 DCL_LOCK_STATE; \
93 \
94 opp = &(GC_objfreelist[2]); \
95 FASTLOCK(); \
96 if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) { \
97 FASTUNLOCK(); \
98 op = GC_generic_malloc_words_small(2, NORMAL); \
99 } else { \
100 *opp = obj_link(op); \
101 GC_words_allocd += 2; \
102 FASTUNLOCK(); \
103 } \
104 ((word *)op)[0] = (word)(first); \
105 ((word *)op)[1] = (word)(second); \
106 (result) = (GC_PTR) op; \
107}
diff --git a/gc/include/gc_inline.h b/gc/include/gc_inline.h
deleted file mode 100644
index db62d1d58a8..00000000000
--- a/gc/include/gc_inline.h
+++ /dev/null
@@ -1 +0,0 @@
1# include "gc_inl.h"
diff --git a/gc/include/gc_local_alloc.h b/gc/include/gc_local_alloc.h
deleted file mode 100644
index 88e29e9a11f..00000000000
--- a/gc/include/gc_local_alloc.h
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
3 *
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
6 *
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
12 */
13
14/*
15 * Interface for thread local allocation. Memory obtained
16 * this way can be used by all threads, as though it were obtained
17 * from an allocator like GC_malloc. The difference is that GC_local_malloc
18 * counts the number of allocations of a given size from the current thread,
19 * and uses GC_malloc_many to perform the allocations once a threashold
20 * is exceeded. Thus far less synchronization may be needed.
21 * Allocation of known large objects should not use this interface.
22 * This interface is designed primarily for fast allocation of small
23 * objects on multiprocessors, e.g. for a JVM running on an MP server.
24 *
25 * If this file is included with GC_GCJ_SUPPORT defined, GCJ-style
26 * bitmap allocation primitives will also be included.
27 *
28 * If this file is included with GC_REDIRECT_TO_LOCAL defined, then
29 * GC_MALLOC, GC_MALLOC_ATOMIC, and possibly GC_GCJ_MALLOC will
30 * be redefined to use the thread local allocatoor.
31 *
32 * The interface is available only if the collector is built with
33 * -DTHREAD_LOCAL_ALLOC, which is currently supported only on Linux.
34 *
35 * The debugging allocators use standard, not thread-local allocation.
36 *
37 * These routines normally require an explicit call to GC_init(), though
38 * that may be done from a constructor function.
39 */
40
41#ifndef GC_LOCAL_ALLOC_H
42#define GC_LOCAL_ALLOC_H
43
44#ifndef _GC_H
45# include "gc.h"
46#endif
47
48#if defined(GC_GCJ_SUPPORT) && !defined(GC_GCJ_H)
49# include "gc_gcj.h"
50#endif
51
52/* We assume ANSI C for this interface. */
53
54GC_PTR GC_local_malloc(size_t bytes);
55
56GC_PTR GC_local_malloc_atomic(size_t bytes);
57
58#if defined(GC_GCJ_SUPPORT)
59 GC_PTR GC_local_gcj_malloc(size_t bytes,
60 void * ptr_to_struct_containing_descr);
61#endif
62
63# ifdef GC_DEBUG
64# define GC_LOCAL_MALLOC(s) GC_debug_malloc(s,GC_EXTRAS)
65# define GC_LOCAL_MALLOC_ATOMIC(s) GC_debug_malloc_atomic(s,GC_EXTRAS)
66# ifdef GC_GCJ_SUPPORT
67# define GC_LOCAL_GCJ_MALLOC(s,d) GC_debug_gcj_malloc(s,d,GC_EXTRAS)
68# endif
69# else
70# define GC_LOCAL_MALLOC(s) GC_local_malloc(s)
71# define GC_LOCAL_MALLOC_ATOMIC(s) GC_local_malloc_atomic(s)
72# ifdef GC_GCJ_SUPPORT
73# define GC_LOCAL_GCJ_MALLOC(s,d) GC_local_gcj_malloc(s,d)
74# endif
75# endif
76
77# ifdef GC_REDIRECT_TO_LOCAL
78# undef GC_MALLOC
79# define GC_MALLOC(s) GC_LOCAL_MALLOC(s)
80# undef GC_MALLOC_ATOMIC
81# define GC_MALLOC_ATOMIC(s) GC_LOCAL_MALLOC_ATOMIC(s)
82# ifdef GC_GCJ_SUPPORT
83# undef GC_GCJ_MALLOC
84# define GC_GCJ_MALLOC(s,d) GC_LOCAL_GCJ_MALLOC(s,d)
85# endif
86# endif
87
88#endif /* GC_LOCAL_ALLOC_H */
diff --git a/gc/include/gc_mark.h b/gc/include/gc_mark.h
deleted file mode 100644
index 9ddba2ca527..00000000000
--- a/gc/include/gc_mark.h
+++ /dev/null
@@ -1,147 +0,0 @@
1/*
2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 2001 by Hewlett-Packard Company. All rights reserved.
4 *
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 *
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
13 *
14 */
15
16/*
17 * This contains interfaces to the GC marker that are likely to be useful to
18 * clients that provide detailed heap layout information to the collector.
19 * This interface should not be used by normal C or C++ clients.
20 * It will be useful to runtimes for other languages.
21 *
22 * Note that this file is not "namespace-clean", i.e. it introduces names
23 * not prefixed with GC_, which may collide with the client's names. It
24 * should be included only in those few places that directly provide
25 * information to the collector.
26 */
27#ifndef GC_MARK_H
28# define GC_MARK_H
29
30# ifndef GC_H
31# include "gc.h"
32# endif
33
34/* A client supplied mark procedure. Returns new mark stack pointer. */
35/* Primary effect should be to push new entries on the mark stack. */
36/* Mark stack pointer values are passed and returned explicitly. */
37/* Global variables decribing mark stack are not necessarily valid. */
38/* (This usually saves a few cycles by keeping things in registers.) */
39/* Assumed to scan about GC_PROC_BYTES on average. If it needs to do */
40/* much more work than that, it should do it in smaller pieces by */
41/* pushing itself back on the mark stack. */
42/* Note that it should always do some work (defined as marking some */
43/* objects) before pushing more than one entry on the mark stack. */
44/* This is required to ensure termination in the event of mark stack */
45/* overflows. */
46/* This procedure is always called with at least one empty entry on the */
47/* mark stack. */
48/* Currently we require that mark procedures look for pointers in a */
49/* subset of the places the conservative marker would. It must be safe */
50/* to invoke the normal mark procedure instead. */
51/* WARNING: Such a mark procedure may be invoked on an unused object */
52/* residing on a free list. Such objects are cleared, except for a */
53/* free list link field in the first word. Thus mark procedures may */
54/* not count on the presence of a type descriptor, and must handle this */
55/* case correctly somehow. */
56# define GC_PROC_BYTES 100
57struct GC_ms_entry;
58typedef struct GC_ms_entry * (*GC_mark_proc) GC_PROTO((
59 GC_word * addr, struct GC_ms_entry * mark_stack_ptr,
60 struct GC_ms_entry * mark_stack_limit, GC_word env));
61
62# define GC_LOG_MAX_MARK_PROCS 6
63# define GC_MAX_MARK_PROCS (1 << GC_LOG_MAX_MARK_PROCS)
64
65/* In a few cases it's necessary to assign statically known indices to */
66/* certain mark procs. Thus we reserve a few for well known clients. */
67/* (This is necessary if mark descriptors are compiler generated.) */
68#define GC_RESERVED_MARK_PROCS 8
69# define GC_GCJ_RESERVED_MARK_PROC_INDEX 0
70
71/* Object descriptors on mark stack or in objects. Low order two */
72/* bits are tags distinguishing among the following 4 possibilities */
73/* for the high order 30 bits. */
74#define GC_DS_TAG_BITS 2
75#define GC_DS_TAGS ((1 << GC_DS_TAG_BITS) - 1)
76#define GC_DS_LENGTH 0 /* The entire word is a length in bytes that */
77 /* must be a multiple of 4. */
78#define GC_DS_BITMAP 1 /* 30 (62) bits are a bitmap describing pointer */
79 /* fields. The msb is 1 iff the first word */
80 /* is a pointer. */
81 /* (This unconventional ordering sometimes */
82 /* makes the marker slightly faster.) */
83 /* Zeroes indicate definite nonpointers. Ones */
84 /* indicate possible pointers. */
85 /* Only usable if pointers are word aligned. */
86#define GC_DS_PROC 2
87 /* The objects referenced by this object can be */
88 /* pushed on the mark stack by invoking */
89 /* PROC(descr). ENV(descr) is passed as the */
90 /* last argument. */
91# define GC_MAKE_PROC(proc_index, env) \
92 (((((env) << GC_LOG_MAX_MARK_PROCS) \
93 | (proc_index)) << GC_DS_TAG_BITS) | GC_DS_PROC)
94#define GC_DS_PER_OBJECT 3 /* The real descriptor is at the */
95 /* byte displacement from the beginning of the */
96 /* object given by descr & ~DS_TAGS */
97 /* If the descriptor is negative, the real */
98 /* descriptor is at (*<object_start>) - */
99 /* (descr & ~DS_TAGS) - GC_INDIR_PER_OBJ_BIAS */
100 /* The latter alternative can be used if each */
101 /* object contains a type descriptor in the */
102 /* first word. */
103 /* Note that in multithreaded environments */
104 /* per object descriptors maust be located in */
105 /* either the first two or last two words of */
106 /* the object, since only those are guaranteed */
107 /* to be cleared while the allocation lock is */
108 /* held. */
109#define GC_INDIR_PER_OBJ_BIAS 0x10
110
111extern GC_PTR GC_least_plausible_heap_addr;
112extern GC_PTR GC_greatest_plausible_heap_addr;
113 /* Bounds on the heap. Guaranteed valid */
114 /* Likely to include future heap expansion. */
115
116/* Handle nested references in a custom mark procedure. */
117/* Check if obj is a valid object. If so, ensure that it is marked. */
118/* If it was not previously marked, push its contents onto the mark */
119/* stack for future scanning. The object will then be scanned using */
120/* its mark descriptor. */
121/* Returns the new mark stack pointer. */
122/* Handles mark stack overflows correctly. */
123/* Since this marks first, it makes progress even if there are mark */
124/* stack overflows. */
125/* Src is the address of the pointer to obj, which is used only */
126/* for back pointer-based heap debugging. */
127/* It is strongly recommended that most objects be handled without mark */
128/* procedures, e.g. with bitmap descriptors, and that mark procedures */
129/* be reserved for exceptional cases. That will ensure that */
130/* performance of this call is not extremely performance critical. */
131/* (Otherwise we would need to inline GC_mark_and_push completely, */
132/* which would tie the client code to a fixed collector version.) */
133/* Note that mark procedures should explicitly call FIXUP_POINTER() */
134/* if required. */
135struct GC_ms_entry *GC_mark_and_push
136 GC_PROTO((GC_PTR obj,
137 struct GC_ms_entry * mark_stack_ptr,
138 struct GC_ms_entry * mark_stack_limit, GC_PTR *src));
139
140#define GC_MARK_AND_PUSH(obj, msp, lim, src) \
141 (((GC_word)obj >= (GC_word)GC_least_plausible_heap_addr && \
142 (GC_word)obj <= (GC_word)GC_greatest_plausible_heap_addr)? \
143 GC_mark_and_push(obj, msp, lim, src) : \
144 msp)
145
146#endif /* GC_MARK_H */
147
diff --git a/gc/include/gc_pthread_redirects.h b/gc/include/gc_pthread_redirects.h
deleted file mode 100644
index 47284fbc97a..00000000000
--- a/gc/include/gc_pthread_redirects.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/* Our pthread support normally needs to intercept a number of thread */
2/* calls. We arrange to do that here, if appropriate. */
3
4#ifndef GC_PTHREAD_REDIRECTS_H
5
6#define GC_PTHREAD_REDIRECTS_H
7
8#if defined(GC_SOLARIS_THREADS)
9/* We need to intercept calls to many of the threads primitives, so */
10/* that we can locate thread stacks and stop the world. */
11/* Note also that the collector cannot see thread specific data. */
12/* Thread specific data should generally consist of pointers to */
13/* uncollectable objects (allocated with GC_malloc_uncollectable, */
14/* not the system malloc), which are deallocated using the destructor */
15/* facility in thr_keycreate. Alternatively, keep a redundant pointer */
16/* to thread specific data on the thread stack. */
17# include <thread.h>
18 int GC_thr_create(void *stack_base, size_t stack_size,
19 void *(*start_routine)(void *), void *arg, long flags,
20 thread_t *new_thread);
21 int GC_thr_join(thread_t wait_for, thread_t *departed, void **status);
22 int GC_thr_suspend(thread_t target_thread);
23 int GC_thr_continue(thread_t target_thread);
24 void * GC_dlopen(const char *path, int mode);
25# define thr_create GC_thr_create
26# define thr_join GC_thr_join
27# define thr_suspend GC_thr_suspend
28# define thr_continue GC_thr_continue
29#endif /* GC_SOLARIS_THREADS */
30
31#if defined(GC_SOLARIS_PTHREADS)
32# include <pthread.h>
33# include <signal.h>
34 extern int GC_pthread_create(pthread_t *new_thread,
35 const pthread_attr_t *attr,
36 void * (*thread_execp)(void *), void *arg);
37 extern int GC_pthread_join(pthread_t wait_for, void **status);
38# define pthread_join GC_pthread_join
39# define pthread_create GC_pthread_create
40#endif
41
42#if defined(GC_SOLARIS_PTHREADS) || defined(GC_SOLARIS_THREADS)
43# define dlopen GC_dlopen
44#endif /* SOLARIS_THREADS || SOLARIS_PTHREADS */
45
46
47#if !defined(GC_USE_LD_WRAP) && defined(GC_PTHREADS) && !defined(GC_SOLARIS_PTHREADS)
48/* We treat these similarly. */
49# include <pthread.h>
50# include <signal.h>
51
52 int GC_pthread_create(pthread_t *new_thread,
53 const pthread_attr_t *attr,
54 void *(*start_routine)(void *), void *arg);
55 int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset);
56 int GC_pthread_join(pthread_t thread, void **retval);
57 int GC_pthread_detach(pthread_t thread);
58
59# define pthread_create GC_pthread_create
60# define pthread_sigmask GC_pthread_sigmask
61# define pthread_join GC_pthread_join
62# define pthread_detach GC_pthread_detach
63# define dlopen GC_dlopen
64
65#endif /* GC_xxxxx_THREADS */
66
67#endif /* GC_PTHREAD_REDIRECTS_H */
diff --git a/gc/include/gc_typed.h b/gc/include/gc_typed.h
deleted file mode 100644
index bdbb82bcfd5..00000000000
--- a/gc/include/gc_typed.h
+++ /dev/null
@@ -1,113 +0,0 @@
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright 1996 Silicon Graphics. All rights reserved.
5 *
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 *
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
14 */
15/*
16 * Some simple primitives for allocation with explicit type information.
17 * Facilities for dynamic type inference may be added later.
18 * Should be used only for extremely performance critical applications,
19 * or if conservative collector leakage is otherwise a problem (unlikely).
20 * Note that this is implemented completely separately from the rest
21 * of the collector, and is not linked in unless referenced.
22 * This does not currently support GC_DEBUG in any interesting way.
23 */
24/* Boehm, May 19, 1994 2:13 pm PDT */
25
26#ifndef _GC_TYPED_H
27# define _GC_TYPED_H
28# ifndef _GC_H
29# include "gc.h"
30# endif
31
32#ifdef __cplusplus
33 extern "C" {
34#endif
35typedef GC_word * GC_bitmap;
36 /* The least significant bit of the first word is one if */
37 /* the first word in the object may be a pointer. */
38
39# define GC_WORDSZ (8*sizeof(GC_word))
40# define GC_get_bit(bm, index) \
41 (((bm)[index/GC_WORDSZ] >> (index%GC_WORDSZ)) & 1)
42# define GC_set_bit(bm, index) \
43 (bm)[index/GC_WORDSZ] |= ((GC_word)1 << (index%GC_WORDSZ))
44# define GC_WORD_OFFSET(t, f) (offsetof(t,f)/sizeof(GC_word))
45# define GC_WORD_LEN(t) (sizeof(t)/ sizeof(GC_word))
46# define GC_BITMAP_SIZE(t) ((GC_WORD_LEN(t) + GC_WORDSZ-1)/GC_WORDSZ)
47
48typedef GC_word GC_descr;
49
50GC_API GC_descr GC_make_descriptor GC_PROTO((GC_bitmap bm, size_t len));
51 /* Return a type descriptor for the object whose layout */
52 /* is described by the argument. */
53 /* The least significant bit of the first word is one */
54 /* if the first word in the object may be a pointer. */
55 /* The second argument specifies the number of */
56 /* meaningful bits in the bitmap. The actual object */
57 /* may be larger (but not smaller). Any additional */
58 /* words in the object are assumed not to contain */
59 /* pointers. */
60 /* Returns a conservative approximation in the */
61 /* (unlikely) case of insufficient memory to build */
62 /* the descriptor. Calls to GC_make_descriptor */
63 /* may consume some amount of a finite resource. This */
64 /* is intended to be called once per type, not once */
65 /* per allocation. */
66
67/* It is possible to generate a descriptor for a C type T with */
68/* word aligned pointer fields f1, f2, ... as follows: */
69/* */
70/* GC_descr T_descr;
71/* GC_word T_bitmap[GC_BITMAP_SIZE(T)] = {0}; */
72/* GC_set_bit(T_bitmap, GC_WORD_OFFSET(T,f1)); */
73/* GC_set_bit(T_bitmap, GC_WORD_OFFSET(T,f2)); */
74/* ... */
75/* T_descr = GC_make_descriptor(T_bitmap, GC_WORD_LEN(T)); */
76
77GC_API GC_PTR GC_malloc_explicitly_typed
78 GC_PROTO((size_t size_in_bytes, GC_descr d));
79 /* Allocate an object whose layout is described by d. */
80 /* The resulting object MAY NOT BE PASSED TO REALLOC. */
81 /* The returned object is cleared. */
82
83GC_API GC_PTR GC_malloc_explicitly_typed_ignore_off_page
84 GC_PROTO((size_t size_in_bytes, GC_descr d));
85
86GC_API GC_PTR GC_calloc_explicitly_typed
87 GC_PROTO((size_t nelements,
88 size_t element_size_in_bytes,
89 GC_descr d));
90 /* Allocate an array of nelements elements, each of the */
91 /* given size, and with the given descriptor. */
92 /* The elemnt size must be a multiple of the byte */
93 /* alignment required for pointers. E.g. on a 32-bit */
94 /* machine with 16-bit aligned pointers, size_in_bytes */
95 /* must be a multiple of 2. */
96 /* Returned object is cleared. */
97
98#ifdef GC_DEBUG
99# define GC_MALLOC_EXPLICITLY_TYPED(bytes, d) GC_MALLOC(bytes)
100# define GC_CALLOC_EXPLICITLY_TYPED(n, bytes, d) GC_MALLOC(n*bytes)
101#else
102# define GC_MALLOC_EXPLICITLY_TYPED(bytes, d) \
103 GC_malloc_explicitly_typed(bytes, d)
104# define GC_CALLOC_EXPLICITLY_TYPED(n, bytes, d) \
105 GC_calloc_explicitly_typed(n, bytes, d)
106#endif /* !GC_DEBUG */
107
108#ifdef __cplusplus
109 } /* matches extern "C" */
110#endif
111
112#endif /* _GC_TYPED_H */
113
diff --git a/gc/include/javaxfc.h b/gc/include/javaxfc.h
deleted file mode 100644
index 880020c5363..00000000000
--- a/gc/include/javaxfc.h
+++ /dev/null
@@ -1,41 +0,0 @@
1# ifndef GC_H
2# include "gc.h"
3# endif
4
5/*
6 * Invoke all remaining finalizers that haven't yet been run.
7 * This is needed for strict compliance with the Java standard,
8 * which can make the runtime guarantee that all finalizers are run.
9 * This is problematic for several reasons:
10 * 1) It means that finalizers, and all methods calle by them,
11 * must be prepared to deal with objects that have been finalized in
12 * spite of the fact that they are still referenced by statically
13 * allocated pointer variables.
14 * 1) It may mean that we get stuck in an infinite loop running
15 * finalizers which create new finalizable objects, though that's
16 * probably unlikely.
17 * Thus this is not recommended for general use.
18 */
19void GC_finalize_all();
20
21/*
22 * A version of GC_register_finalizer that allows the object to be
23 * finalized before the objects it references. This is again error
24 * prone, in that it makes it easy to accidentally reference finalized
25 * objects. Again, recommended only for JVM implementors.
26 */
27void GC_register_finalizer_no_order(GC_PTR obj,
28 GC_finalization_proc fn, GC_PTR cd,
29 GC_finalization_proc *ofn, GC_PTR * ocd);
30
31void GC_debug_register_finalizer_no_order(GC_PTR obj,
32 GC_finalization_proc fn, GC_PTR cd,
33 GC_finalization_proc *ofn, GC_PTR * ocd);
34
35#ifdef GC_DEBUG
36# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
37 GC_debug_register_finalizer_no_order(p, f, d, of, od)
38#else
39# define GC_REGISTER_FINALIZER(p, f, d, of, od) \
40 GC_register_finalizer_no_order(p, f, d, of, od)
41#endif
diff --git a/gc/include/leak_detector.h b/gc/include/leak_detector.h
deleted file mode 100644
index 0674ab4d09f..00000000000
--- a/gc/include/leak_detector.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#define GC_DEBUG
2#include "gc.h"
3#define malloc(n) GC_MALLOC(n)
4#define calloc(m,n) GC_MALLOC((m)*(n))
5#define free(p) GC_FREE(p)
6#define realloc(p,n) GC_REALLOC((p),(n))
7#define CHECK_LEAKS() GC_gcollect()
diff --git a/gc/include/new_gc_alloc.h b/gc/include/new_gc_alloc.h
deleted file mode 100644
index 20a2fabf83c..00000000000
--- a/gc/include/new_gc_alloc.h
+++ /dev/null
@@ -1,480 +0,0 @@
1/*
2 * Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved.
3 *
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
6 *
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
12 */
13
14//
15// This is a revision of gc_alloc.h for SGI STL versions > 3.0
16// Unlike earlier versions, it supplements the standard "alloc.h"
17// instead of replacing it.
18//
19// This is sloppy about variable names used in header files.
20// It also doesn't yet understand the new header file names or
21// namespaces.
22//
23// This assumes the collector has been compiled with -DATOMIC_UNCOLLECTABLE.
24// The user should also consider -DREDIRECT_MALLOC=GC_uncollectable_malloc,
25// to ensure that object allocated through malloc are traced.
26//
27// Some of this could be faster in the explicit deallocation case.
28// In particular, we spend too much time clearing objects on the
29// free lists. That could be avoided.
30//
31// This uses template classes with static members, and hence does not work
32// with g++ 2.7.2 and earlier.
33//
34// Unlike its predecessor, this one simply defines
35// gc_alloc
36// single_client_gc_alloc
37// traceable_alloc
38// single_client_traceable_alloc
39//
40// It does not redefine alloc. Nor does it change the default allocator,
41// though the user may wish to do so. (The argument against changing
42// the default allocator is that it may introduce subtle link compatibility
43// problems. The argument for changing it is that the usual default
44// allocator is usually a very bad choice for a garbage collected environment.)
45//
46// This code assumes that the collector itself has been compiled with a
47// compiler that defines __STDC__ .
48//
49
50#ifndef GC_ALLOC_H
51
52#include "gc.h"
53
54#if (__GNUC__ < 3)
55# include <stack> // A more portable way to get stl_alloc.h .
56#else
57# include <bits/stl_alloc.h>
58# ifndef __STL_BEGIN_NAMESPACE
59# define __STL_BEGIN_NAMESPACE namespace std {
60# define __STL_END_NAMESPACE };
61# endif
62#ifndef __STL_USE_STD_ALLOCATORS
63#define __STL_USE_STD_ALLOCATORS
64#endif
65#endif
66
67/* A hack to deal with gcc 3.1. If you are using gcc3.1 and later, */
68/* you should probably really use gc_allocator.h instead. */
69#if defined (__GNUC__) && \
70 (__GNUC > 3 || (__GNUC__ == 3 && (__GNUC_MINOR__ >= 1)))
71# define simple_alloc __simple_alloc
72#endif
73
74
75
76#define GC_ALLOC_H
77
78#include <stddef.h>
79#include <string.h>
80
81// The following need to match collector data structures.
82// We can't include gc_priv.h, since that pulls in way too much stuff.
83// This should eventually be factored out into another include file.
84
85extern "C" {
86 extern void ** const GC_objfreelist_ptr;
87 extern void ** const GC_aobjfreelist_ptr;
88 extern void ** const GC_uobjfreelist_ptr;
89 extern void ** const GC_auobjfreelist_ptr;
90
91 extern void GC_incr_words_allocd(size_t words);
92 extern void GC_incr_mem_freed(size_t words);
93
94 extern char * GC_generic_malloc_words_small(size_t word, int kind);
95}
96
97// Object kinds; must match PTRFREE, NORMAL, UNCOLLECTABLE, and
98// AUNCOLLECTABLE in gc_priv.h.
99
100enum { GC_PTRFREE = 0, GC_NORMAL = 1, GC_UNCOLLECTABLE = 2,
101 GC_AUNCOLLECTABLE = 3 };
102
103enum { GC_max_fast_bytes = 255 };
104
105enum { GC_bytes_per_word = sizeof(char *) };
106
107enum { GC_byte_alignment = 8 };
108
109enum { GC_word_alignment = GC_byte_alignment/GC_bytes_per_word };
110
111inline void * &GC_obj_link(void * p)
112{ return *(void **)p; }
113
114// Compute a number of words >= n+1 bytes.
115// The +1 allows for pointers one past the end.
116inline size_t GC_round_up(size_t n)
117{
118 return ((n + GC_byte_alignment)/GC_byte_alignment)*GC_word_alignment;
119}
120
121// The same but don't allow for extra byte.
122inline size_t GC_round_up_uncollectable(size_t n)
123{
124 return ((n + GC_byte_alignment - 1)/GC_byte_alignment)*GC_word_alignment;
125}
126
127template <int dummy>
128class GC_aux_template {
129public:
130 // File local count of allocated words. Occasionally this is
131 // added into the global count. A separate count is necessary since the
132 // real one must be updated with a procedure call.
133 static size_t GC_words_recently_allocd;
134
135 // Same for uncollectable mmory. Not yet reflected in either
136 // GC_words_recently_allocd or GC_non_gc_bytes.
137 static size_t GC_uncollectable_words_recently_allocd;
138
139 // Similar counter for explicitly deallocated memory.
140 static size_t GC_mem_recently_freed;
141
142 // Again for uncollectable memory.
143 static size_t GC_uncollectable_mem_recently_freed;
144
145 static void * GC_out_of_line_malloc(size_t nwords, int kind);
146};
147
148template <int dummy>
149size_t GC_aux_template<dummy>::GC_words_recently_allocd = 0;
150
151template <int dummy>
152size_t GC_aux_template<dummy>::GC_uncollectable_words_recently_allocd = 0;
153
154template <int dummy>
155size_t GC_aux_template<dummy>::GC_mem_recently_freed = 0;
156
157template <int dummy>
158size_t GC_aux_template<dummy>::GC_uncollectable_mem_recently_freed = 0;
159
160template <int dummy>
161void * GC_aux_template<dummy>::GC_out_of_line_malloc(size_t nwords, int kind)
162{
163 GC_words_recently_allocd += GC_uncollectable_words_recently_allocd;
164 GC_non_gc_bytes +=
165 GC_bytes_per_word * GC_uncollectable_words_recently_allocd;
166 GC_uncollectable_words_recently_allocd = 0;
167
168 GC_mem_recently_freed += GC_uncollectable_mem_recently_freed;
169 GC_non_gc_bytes -=
170 GC_bytes_per_word * GC_uncollectable_mem_recently_freed;
171 GC_uncollectable_mem_recently_freed = 0;
172
173 GC_incr_words_allocd(GC_words_recently_allocd);
174 GC_words_recently_allocd = 0;
175
176 GC_incr_mem_freed(GC_mem_recently_freed);
177 GC_mem_recently_freed = 0;
178
179 return GC_generic_malloc_words_small(nwords, kind);
180}
181
182typedef GC_aux_template<0> GC_aux;
183
184// A fast, single-threaded, garbage-collected allocator
185// We assume the first word will be immediately overwritten.
186// In this version, deallocation is not a noop, and explicit
187// deallocation is likely to help performance.
188template <int dummy>
189class single_client_gc_alloc_template {
190 public:
191 static void * allocate(size_t n)
192 {
193 size_t nwords = GC_round_up(n);
194 void ** flh;
195 void * op;
196
197 if (n > GC_max_fast_bytes) return GC_malloc(n);
198 flh = GC_objfreelist_ptr + nwords;
199 if (0 == (op = *flh)) {
200 return GC_aux::GC_out_of_line_malloc(nwords, GC_NORMAL);
201 }
202 *flh = GC_obj_link(op);
203 GC_aux::GC_words_recently_allocd += nwords;
204 return op;
205 }
206 static void * ptr_free_allocate(size_t n)
207 {
208 size_t nwords = GC_round_up(n);
209 void ** flh;
210 void * op;
211
212 if (n > GC_max_fast_bytes) return GC_malloc_atomic(n);
213 flh = GC_aobjfreelist_ptr + nwords;
214 if (0 == (op = *flh)) {
215 return GC_aux::GC_out_of_line_malloc(nwords, GC_PTRFREE);
216 }
217 *flh = GC_obj_link(op);
218 GC_aux::GC_words_recently_allocd += nwords;
219 return op;
220 }
221 static void deallocate(void *p, size_t n)
222 {
223 size_t nwords = GC_round_up(n);
224 void ** flh;
225
226 if (n > GC_max_fast_bytes) {
227 GC_free(p);
228 } else {
229 flh = GC_objfreelist_ptr + nwords;
230 GC_obj_link(p) = *flh;
231 memset((char *)p + GC_bytes_per_word, 0,
232 GC_bytes_per_word * (nwords - 1));
233 *flh = p;
234 GC_aux::GC_mem_recently_freed += nwords;
235 }
236 }
237 static void ptr_free_deallocate(void *p, size_t n)
238 {
239 size_t nwords = GC_round_up(n);
240 void ** flh;
241
242 if (n > GC_max_fast_bytes) {
243 GC_free(p);
244 } else {
245 flh = GC_aobjfreelist_ptr + nwords;
246 GC_obj_link(p) = *flh;
247 *flh = p;
248 GC_aux::GC_mem_recently_freed += nwords;
249 }
250 }
251};
252
253typedef single_client_gc_alloc_template<0> single_client_gc_alloc;
254
255// Once more, for uncollectable objects.
256template <int dummy>
257class single_client_traceable_alloc_template {
258 public:
259 static void * allocate(size_t n)
260 {
261 size_t nwords = GC_round_up_uncollectable(n);
262 void ** flh;
263 void * op;
264
265 if (n > GC_max_fast_bytes) return GC_malloc_uncollectable(n);
266 flh = GC_uobjfreelist_ptr + nwords;
267 if (0 == (op = *flh)) {
268 return GC_aux::GC_out_of_line_malloc(nwords, GC_UNCOLLECTABLE);
269 }
270 *flh = GC_obj_link(op);
271 GC_aux::GC_uncollectable_words_recently_allocd += nwords;
272 return op;
273 }
274 static void * ptr_free_allocate(size_t n)
275 {
276 size_t nwords = GC_round_up_uncollectable(n);
277 void ** flh;
278 void * op;
279
280 if (n > GC_max_fast_bytes) return GC_malloc_atomic_uncollectable(n);
281 flh = GC_auobjfreelist_ptr + nwords;
282 if (0 == (op = *flh)) {
283 return GC_aux::GC_out_of_line_malloc(nwords, GC_AUNCOLLECTABLE);
284 }
285 *flh = GC_obj_link(op);
286 GC_aux::GC_uncollectable_words_recently_allocd += nwords;
287 return op;
288 }
289 static void deallocate(void *p, size_t n)
290 {
291 size_t nwords = GC_round_up_uncollectable(n);
292 void ** flh;
293
294 if (n > GC_max_fast_bytes) {
295 GC_free(p);
296 } else {
297 flh = GC_uobjfreelist_ptr + nwords;
298 GC_obj_link(p) = *flh;
299 *flh = p;
300 GC_aux::GC_uncollectable_mem_recently_freed += nwords;
301 }
302 }
303 static void ptr_free_deallocate(void *p, size_t n)
304 {
305 size_t nwords = GC_round_up_uncollectable(n);
306 void ** flh;
307
308 if (n > GC_max_fast_bytes) {
309 GC_free(p);
310 } else {
311 flh = GC_auobjfreelist_ptr + nwords;
312 GC_obj_link(p) = *flh;
313 *flh = p;
314 GC_aux::GC_uncollectable_mem_recently_freed += nwords;
315 }
316 }
317};
318
319typedef single_client_traceable_alloc_template<0> single_client_traceable_alloc;
320
321template < int dummy >
322class gc_alloc_template {
323 public:
324 static void * allocate(size_t n) { return GC_malloc(n); }
325 static void * ptr_free_allocate(size_t n)
326 { return GC_malloc_atomic(n); }
327 static void deallocate(void *, size_t) { }
328 static void ptr_free_deallocate(void *, size_t) { }
329};
330
331typedef gc_alloc_template < 0 > gc_alloc;
332
333template < int dummy >
334class traceable_alloc_template {
335 public:
336 static void * allocate(size_t n) { return GC_malloc_uncollectable(n); }
337 static void * ptr_free_allocate(size_t n)
338 { return GC_malloc_atomic_uncollectable(n); }
339 static void deallocate(void *p, size_t) { GC_free(p); }
340 static void ptr_free_deallocate(void *p, size_t) { GC_free(p); }
341};
342
343typedef traceable_alloc_template < 0 > traceable_alloc;
344
345// We want to specialize simple_alloc so that it does the right thing
346// for all pointerfree types. At the moment there is no portable way to
347// even approximate that. The following approximation should work for
348// SGI compilers, and recent versions of g++.
349
350# define __GC_SPECIALIZE(T,alloc) \
351class simple_alloc<T, alloc> { \
352public: \
353 static T *allocate(size_t n) \
354 { return 0 == n? 0 : \
355 (T*) alloc::ptr_free_allocate(n * sizeof (T)); } \
356 static T *allocate(void) \
357 { return (T*) alloc::ptr_free_allocate(sizeof (T)); } \
358 static void deallocate(T *p, size_t n) \
359 { if (0 != n) alloc::ptr_free_deallocate(p, n * sizeof (T)); } \
360 static void deallocate(T *p) \
361 { alloc::ptr_free_deallocate(p, sizeof (T)); } \
362};
363
364__STL_BEGIN_NAMESPACE
365
366__GC_SPECIALIZE(char, gc_alloc)
367__GC_SPECIALIZE(int, gc_alloc)
368__GC_SPECIALIZE(unsigned, gc_alloc)
369__GC_SPECIALIZE(float, gc_alloc)
370__GC_SPECIALIZE(double, gc_alloc)
371
372__GC_SPECIALIZE(char, traceable_alloc)
373__GC_SPECIALIZE(int, traceable_alloc)
374__GC_SPECIALIZE(unsigned, traceable_alloc)
375__GC_SPECIALIZE(float, traceable_alloc)
376__GC_SPECIALIZE(double, traceable_alloc)
377
378__GC_SPECIALIZE(char, single_client_gc_alloc)
379__GC_SPECIALIZE(int, single_client_gc_alloc)
380__GC_SPECIALIZE(unsigned, single_client_gc_alloc)
381__GC_SPECIALIZE(float, single_client_gc_alloc)
382__GC_SPECIALIZE(double, single_client_gc_alloc)
383
384__GC_SPECIALIZE(char, single_client_traceable_alloc)
385__GC_SPECIALIZE(int, single_client_traceable_alloc)
386__GC_SPECIALIZE(unsigned, single_client_traceable_alloc)
387__GC_SPECIALIZE(float, single_client_traceable_alloc)
388__GC_SPECIALIZE(double, single_client_traceable_alloc)
389
390__STL_END_NAMESPACE
391
392#ifdef __STL_USE_STD_ALLOCATORS
393
394__STL_BEGIN_NAMESPACE
395
396template <class _T>
397struct _Alloc_traits<_T, gc_alloc >
398{
399 static const bool _S_instanceless = true;
400 typedef simple_alloc<_T, gc_alloc > _Alloc_type;
401 typedef __allocator<_T, gc_alloc > allocator_type;
402};
403
404inline bool operator==(const gc_alloc&,
405 const gc_alloc&)
406{
407 return true;
408}
409
410inline bool operator!=(const gc_alloc&,
411 const gc_alloc&)
412{
413 return false;
414}
415
416template <class _T>
417struct _Alloc_traits<_T, single_client_gc_alloc >
418{
419 static const bool _S_instanceless = true;
420 typedef simple_alloc<_T, single_client_gc_alloc > _Alloc_type;
421 typedef __allocator<_T, single_client_gc_alloc > allocator_type;
422};
423
424inline bool operator==(const single_client_gc_alloc&,
425 const single_client_gc_alloc&)
426{
427 return true;
428}
429
430inline bool operator!=(const single_client_gc_alloc&,
431 const single_client_gc_alloc&)
432{
433 return false;
434}
435
436template <class _T>
437struct _Alloc_traits<_T, traceable_alloc >
438{
439 static const bool _S_instanceless = true;
440 typedef simple_alloc<_T, traceable_alloc > _Alloc_type;
441 typedef __allocator<_T, traceable_alloc > allocator_type;
442};
443
444inline bool operator==(const traceable_alloc&,
445 const traceable_alloc&)
446{
447 return true;
448}
449
450inline bool operator!=(const traceable_alloc&,
451 const traceable_alloc&)
452{
453 return false;
454}
455
456template <class _T>
457struct _Alloc_traits<_T, single_client_traceable_alloc >
458{
459 static const bool _S_instanceless = true;
460 typedef simple_alloc<_T, single_client_traceable_alloc > _Alloc_type;
461 typedef __allocator<_T, single_client_traceable_alloc > allocator_type;
462};
463
464inline bool operator==(const single_client_traceable_alloc&,
465 const single_client_traceable_alloc&)
466{
467 return true;
468}
469
470inline bool operator!=(const single_client_traceable_alloc&,
471 const single_client_traceable_alloc&)
472{
473 return false;
474}
475
476__STL_END_NAMESPACE
477
478#endif /* __STL_USE_STD_ALLOCATORS */
479
480#endif /* GC_ALLOC_H */
diff --git a/gc/include/private/cord_pos.h b/gc/include/private/cord_pos.h
deleted file mode 100644
index d2b24bb8ab6..00000000000
--- a/gc/include/private/cord_pos.h
+++ /dev/null
@@ -1,118 +0,0 @@
1/*
2 * Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
3 *
4 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
5 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
6 *
7 * Permission is hereby granted to use or copy this program
8 * for any purpose, provided the above notices are retained on all copies.
9 * Permission to modify the code and to distribute modified code is granted,
10 * provided the above notices are retained, and a notice that the code was
11 * modified is included with the above copyright notice.
12 */
13/* Boehm, May 19, 1994 2:23 pm PDT */
14# ifndef CORD_POSITION_H
15
16/* The representation of CORD_position. This is private to the */
17/* implementation, but the size is known to clients. Also */
18/* the implementation of some exported macros relies on it. */
19/* Don't use anything defined here and not in cord.h. */
20
21# define MAX_DEPTH 48
22 /* The maximum depth of a balanced cord + 1. */
23 /* We don't let cords get deeper than MAX_DEPTH. */
24
25struct CORD_pe {
26 CORD pe_cord;
27 size_t pe_start_pos;
28};
29
30/* A structure describing an entry on the path from the root */
31/* to current position. */
32typedef struct CORD_Pos {
33 size_t cur_pos;
34 int path_len;
35# define CORD_POS_INVALID (0x55555555)
36 /* path_len == INVALID <==> position invalid */
37 const char *cur_leaf; /* Current leaf, if it is a string. */
38 /* If the current leaf is a function, */
39 /* then this may point to function_buf */
40 /* containing the next few characters. */
41 /* Always points to a valid string */
42 /* containing the current character */
43 /* unless cur_end is 0. */
44 size_t cur_start; /* Start position of cur_leaf */
45 size_t cur_end; /* Ending position of cur_leaf */
46 /* 0 if cur_leaf is invalid. */
47 struct CORD_pe path[MAX_DEPTH + 1];
48 /* path[path_len] is the leaf corresponding to cur_pos */
49 /* path[0].pe_cord is the cord we point to. */
50# define FUNCTION_BUF_SZ 8
51 char function_buf[FUNCTION_BUF_SZ]; /* Space for next few chars */
52 /* from function node. */
53} CORD_pos[1];
54
55/* Extract the cord from a position: */
56CORD CORD_pos_to_cord(CORD_pos p);
57
58/* Extract the current index from a position: */
59size_t CORD_pos_to_index(CORD_pos p);
60
61/* Fetch the character located at the given position: */
62char CORD_pos_fetch(CORD_pos p);
63
64/* Initialize the position to refer to the give cord and index. */
65/* Note that this is the most expensive function on positions: */
66void CORD_set_pos(CORD_pos p, CORD x, size_t i);
67
68/* Advance the position to the next character. */
69/* P must be initialized and valid. */
70/* Invalidates p if past end: */
71void CORD_next(CORD_pos p);
72
73/* Move the position to the preceding character. */
74/* P must be initialized and valid. */
75/* Invalidates p if past beginning: */
76void CORD_prev(CORD_pos p);
77
78/* Is the position valid, i.e. inside the cord? */
79int CORD_pos_valid(CORD_pos p);
80
81char CORD__pos_fetch(CORD_pos);
82void CORD__next(CORD_pos);
83void CORD__prev(CORD_pos);
84
85#define CORD_pos_fetch(p) \
86 (((p)[0].cur_end != 0)? \
87 (p)[0].cur_leaf[(p)[0].cur_pos - (p)[0].cur_start] \
88 : CORD__pos_fetch(p))
89
90#define CORD_next(p) \
91 (((p)[0].cur_pos + 1 < (p)[0].cur_end)? \
92 (p)[0].cur_pos++ \
93 : (CORD__next(p), 0))
94
95#define CORD_prev(p) \
96 (((p)[0].cur_end != 0 && (p)[0].cur_pos > (p)[0].cur_start)? \
97 (p)[0].cur_pos-- \
98 : (CORD__prev(p), 0))
99
100#define CORD_pos_to_index(p) ((p)[0].cur_pos)
101
102#define CORD_pos_to_cord(p) ((p)[0].path[0].pe_cord)
103
104#define CORD_pos_valid(p) ((p)[0].path_len != CORD_POS_INVALID)
105
106/* Some grubby stuff for performance-critical friends: */
107#define CORD_pos_chars_left(p) ((long)((p)[0].cur_end) - (long)((p)[0].cur_pos))
108 /* Number of characters in cache. <= 0 ==> none */
109
110#define CORD_pos_advance(p,n) ((p)[0].cur_pos += (n) - 1, CORD_next(p))
111 /* Advance position by n characters */
112 /* 0 < n < CORD_pos_chars_left(p) */
113
114#define CORD_pos_cur_char_addr(p) \
115 (p)[0].cur_leaf + ((p)[0].cur_pos - (p)[0].cur_start)
116 /* address of current character in cache. */
117
118#endif
diff --git a/gc/include/private/dbg_mlc.h b/gc/include/private/dbg_mlc.h
deleted file mode 100644
index e2003e6c44f..00000000000
--- a/gc/include/private/dbg_mlc.h
+++ /dev/null
@@ -1,176 +0,0 @@
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17/*
18 * This is mostly an internal header file. Typical clients should
19 * not use it. Clients that define their own object kinds with
20 * debugging allocators will probably want to include this, however.
21 * No attempt is made to keep the namespace clean. This should not be
22 * included from header files that are frequently included by clients.
23 */
24
25#ifndef _DBG_MLC_H
26
27#define _DBG_MLC_H
28
29# define I_HIDE_POINTERS
30# include "gc_priv.h"
31# ifdef KEEP_BACK_PTRS
32# include "gc_backptr.h"
33# endif
34
35#ifndef HIDE_POINTER
36 /* Gc.h was previously included, and hence the I_HIDE_POINTERS */
37 /* definition had no effect. Repeat the gc.h definitions here to */
38 /* get them anyway. */
39 typedef GC_word GC_hidden_pointer;
40# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
41# define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
42#endif /* HIDE_POINTER */
43
44# define START_FLAG ((word)0xfedcedcb)
45# define END_FLAG ((word)0xbcdecdef)
46 /* Stored both one past the end of user object, and one before */
47 /* the end of the object as seen by the allocator. */
48
49# if defined(KEEP_BACK_PTRS) || defined(PRINT_BLACK_LIST) \
50 || defined(MAKE_BACK_GRAPH)
51 /* Pointer "source"s that aren't real locations. */
52 /* Used in oh_back_ptr fields and as "source" */
53 /* argument to some marking functions. */
54# define NOT_MARKED (ptr_t)(0)
55# define MARKED_FOR_FINALIZATION (ptr_t)(2)
56 /* Object was marked because it is finalizable. */
57# define MARKED_FROM_REGISTER (ptr_t)(4)
58 /* Object was marked from a rgister. Hence the */
59 /* source of the reference doesn't have an address. */
60# endif /* KEEP_BACK_PTRS || PRINT_BLACK_LIST */
61
62/* Object header */
63typedef struct {
64# if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
65 /* We potentially keep two different kinds of back */
66 /* pointers. KEEP_BACK_PTRS stores a single back */
67 /* pointer in each reachable object to allow reporting */
68 /* of why an object was retained. MAKE_BACK_GRAPH */
69 /* builds a graph containing the inverse of all */
70 /* "points-to" edges including those involving */
71 /* objects that have just become unreachable. This */
72 /* allows detection of growing chains of unreachable */
73 /* objects. It may be possible to eventually combine */
74 /* both, but for now we keep them separate. Both */
75 /* kinds of back pointers are hidden using the */
76 /* following macros. In both cases, the plain version */
77 /* is constrained to have an least significant bit of 1,*/
78 /* to allow it to be distinguished from a free list */
79 /* link. This means the plain version must have an */
80 /* lsb of 0. */
81 /* Note that blocks dropped by black-listing will */
82 /* also have the lsb clear once debugging has */
83 /* started. */
84 /* We're careful never to overwrite a value with lsb 0. */
85# if ALIGNMENT == 1
86 /* Fudge back pointer to be even. */
87# define HIDE_BACK_PTR(p) HIDE_POINTER(~1 & (GC_word)(p))
88# else
89# define HIDE_BACK_PTR(p) HIDE_POINTER(p)
90# endif
91
92# ifdef KEEP_BACK_PTRS
93 GC_hidden_pointer oh_back_ptr;
94# endif
95# ifdef MAKE_BACK_GRAPH
96 GC_hidden_pointer oh_bg_ptr;
97# endif
98# if defined(ALIGN_DOUBLE) && \
99 (defined(KEEP_BACK_PTRS) != defined(MAKE_BACK_GRAPH))
100 word oh_dummy;
101# endif
102# endif
103 GC_CONST char * oh_string; /* object descriptor string */
104 word oh_int; /* object descriptor integers */
105# ifdef NEED_CALLINFO
106 struct callinfo oh_ci[NFRAMES];
107# endif
108# ifndef SHORT_DBG_HDRS
109 word oh_sz; /* Original malloc arg. */
110 word oh_sf; /* start flag */
111# endif /* SHORT_DBG_HDRS */
112} oh;
113/* The size of the above structure is assumed not to dealign things, */
114/* and to be a multiple of the word length. */
115
116#ifdef SHORT_DBG_HDRS
117# define DEBUG_BYTES (sizeof (oh))
118# define UNCOLLECTABLE_DEBUG_BYTES DEBUG_BYTES
119#else
120 /* Add space for END_FLAG, but use any extra space that was already */
121 /* added to catch off-the-end pointers. */
122 /* For uncollectable objects, the extra byte is not added. */
123# define UNCOLLECTABLE_DEBUG_BYTES (sizeof (oh) + sizeof (word))
124# define DEBUG_BYTES (UNCOLLECTABLE_DEBUG_BYTES - EXTRA_BYTES)
125#endif
126#define USR_PTR_FROM_BASE(p) ((ptr_t)(p) + sizeof(oh))
127
128/* Round bytes to words without adding extra byte at end. */
129#define SIMPLE_ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
130
131/* ADD_CALL_CHAIN stores a (partial) call chain into an object */
132/* header. It may be called with or without the allocation */
133/* lock. */
134/* PRINT_CALL_CHAIN prints the call chain stored in an object */
135/* to stderr. It requires that we do not hold the lock. */
136#ifdef SAVE_CALL_CHAIN
137# define ADD_CALL_CHAIN(base, ra) GC_save_callers(((oh *)(base)) -> oh_ci)
138# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
139#else
140# ifdef GC_ADD_CALLER
141# define ADD_CALL_CHAIN(base, ra) ((oh *)(base)) -> oh_ci[0].ci_pc = (ra)
142# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
143# else
144# define ADD_CALL_CHAIN(base, ra)
145# define PRINT_CALL_CHAIN(base)
146# endif
147#endif
148
149# ifdef GC_ADD_CALLER
150# define OPT_RA ra,
151# else
152# define OPT_RA
153# endif
154
155
156/* Check whether object with base pointer p has debugging info */
157/* p is assumed to point to a legitimate object in our part */
158/* of the heap. */
159#ifdef SHORT_DBG_HDRS
160# define GC_has_other_debug_info(p) TRUE
161#else
162 GC_bool GC_has_other_debug_info(/* p */);
163#endif
164
165#if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
166# define GC_HAS_DEBUG_INFO(p) \
167 ((*((word *)p) & 1) && GC_has_other_debug_info(p))
168#else
169# define GC_HAS_DEBUG_INFO(p) GC_has_other_debug_info(p)
170#endif
171
172/* Store debugging info into p. Return displaced pointer. */
173/* Assumes we don't hold allocation lock. */
174ptr_t GC_store_debug_info(/* p, sz, string, integer */);
175
176#endif /* _DBG_MLC_H */
diff --git a/gc/include/private/gc_hdrs.h b/gc/include/private/gc_hdrs.h
deleted file mode 100644
index 96749ab1bf0..00000000000
--- a/gc/include/private/gc_hdrs.h
+++ /dev/null
@@ -1,233 +0,0 @@
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 *
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 *
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
13 */
14/* Boehm, July 11, 1995 11:54 am PDT */
15# ifndef GC_HEADERS_H
16# define GC_HEADERS_H
17typedef struct hblkhdr hdr;
18
19# if CPP_WORDSZ != 32 && CPP_WORDSZ < 36
20 --> Get a real machine.
21# endif
22
23/*
24 * The 2 level tree data structure that is used to find block headers.
25 * If there are more than 32 bits in a pointer, the top level is a hash
26 * table.
27 *
28 * This defines HDR, GET_HDR, and SET_HDR, the main macros used to
29 * retrieve and set object headers.
30 *
31 * Since 5.0 alpha 5, we can also take advantage of a header lookup
32 * cache. This is a locally declared direct mapped cache, used inside
33 * the marker. The HC_GET_HDR macro uses and maintains this
34 * cache. Assuming we get reasonable hit rates, this shaves a few
35 * memory references from each pointer validation.
36 */
37
38# if CPP_WORDSZ > 32
39# define HASH_TL
40# endif
41
42/* Define appropriate out-degrees for each of the two tree levels */
43# ifdef SMALL_CONFIG
44# define LOG_BOTTOM_SZ 11
45 /* Keep top index size reasonable with smaller blocks. */
46# else
47# define LOG_BOTTOM_SZ 10
48# endif
49# ifndef HASH_TL
50# define LOG_TOP_SZ (WORDSZ - LOG_BOTTOM_SZ - LOG_HBLKSIZE)
51# else
52# define LOG_TOP_SZ 11
53# endif
54# define TOP_SZ (1 << LOG_TOP_SZ)
55# define BOTTOM_SZ (1 << LOG_BOTTOM_SZ)
56
57#ifndef SMALL_CONFIG
58# define USE_HDR_CACHE
59#endif
60
61/* #define COUNT_HDR_CACHE_HITS */
62
63extern hdr * GC_invalid_header; /* header for an imaginary block */
64 /* containing no objects. */
65
66
67/* Check whether p and corresponding hhdr point to long or invalid */
68/* object. If so, advance hhdr to */
69/* beginning of block, or set hhdr to GC_invalid_header. */
70#define ADVANCE(p, hhdr, source) \
71 { \
72 hdr * new_hdr = GC_invalid_header; \
73 p = GC_find_start(p, hhdr, &new_hdr); \
74 hhdr = new_hdr; \
75 }
76
77#ifdef USE_HDR_CACHE
78
79# ifdef COUNT_HDR_CACHE_HITS
80 extern word GC_hdr_cache_hits;
81 extern word GC_hdr_cache_misses;
82# define HC_HIT() ++GC_hdr_cache_hits
83# define HC_MISS() ++GC_hdr_cache_misses
84# else
85# define HC_HIT()
86# define HC_MISS()
87# endif
88
89 typedef struct hce {
90 word block_addr; /* right shifted by LOG_HBLKSIZE */
91 hdr * hce_hdr;
92 } hdr_cache_entry;
93
94# define HDR_CACHE_SIZE 8 /* power of 2 */
95
96# define DECLARE_HDR_CACHE \
97 hdr_cache_entry hdr_cache[HDR_CACHE_SIZE]
98
99# define INIT_HDR_CACHE BZERO(hdr_cache, sizeof(hdr_cache));
100
101# define HCE(h) hdr_cache + (((word)(h) >> LOG_HBLKSIZE) & (HDR_CACHE_SIZE-1))
102
103# define HCE_VALID_FOR(hce,h) ((hce) -> block_addr == \
104 ((word)(h) >> LOG_HBLKSIZE))
105
106# define HCE_HDR(h) ((hce) -> hce_hdr)
107
108
109/* Analogous to GET_HDR, except that in the case of large objects, it */
110/* Returns the header for the object beginning, and updates p. */
111/* Returns &GC_bad_header instead of 0. All of this saves a branch */
112/* in the fast path. */
113# define HC_GET_HDR(p, hhdr, source) \
114 { \
115 hdr_cache_entry * hce = HCE(p); \
116 if (HCE_VALID_FOR(hce, p)) { \
117 HC_HIT(); \
118 hhdr = hce -> hce_hdr; \
119 } else { \
120 HC_MISS(); \
121 GET_HDR(p, hhdr); \
122 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { \
123 ADVANCE(p, hhdr, source); \
124 } else { \
125 hce -> block_addr = (word)(p) >> LOG_HBLKSIZE; \
126 hce -> hce_hdr = hhdr; \
127 } \
128 } \
129 }
130
131#else /* !USE_HDR_CACHE */
132
133# define DECLARE_HDR_CACHE
134
135# define INIT_HDR_CACHE
136
137# define HC_GET_HDR(p, hhdr, source) \
138 { \
139 GET_HDR(p, hhdr); \
140 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { \
141 ADVANCE(p, hhdr, source); \
142 } \
143 }
144#endif
145
146typedef struct bi {
147 hdr * index[BOTTOM_SZ];
148 /*
149 * The bottom level index contains one of three kinds of values:
150 * 0 means we're not responsible for this block,
151 * or this is a block other than the first one in a free block.
152 * 1 < (long)X <= MAX_JUMP means the block starts at least
153 * X * HBLKSIZE bytes before the current address.
154 * A valid pointer points to a hdr structure. (The above can't be
155 * valid pointers due to the GET_MEM return convention.)
156 */
157 struct bi * asc_link; /* All indices are linked in */
158 /* ascending order... */
159 struct bi * desc_link; /* ... and in descending order. */
160 word key; /* high order address bits. */
161# ifdef HASH_TL
162 struct bi * hash_link; /* Hash chain link. */
163# endif
164} bottom_index;
165
166/* extern bottom_index GC_all_nils; - really part of GC_arrays */
167
168/* extern bottom_index * GC_top_index []; - really part of GC_arrays */
169 /* Each entry points to a bottom_index. */
170 /* On a 32 bit machine, it points to */
171 /* the index for a set of high order */
172 /* bits equal to the index. For longer */
173 /* addresses, we hash the high order */
174 /* bits to compute the index in */
175 /* GC_top_index, and each entry points */
176 /* to a hash chain. */
177 /* The last entry in each chain is */
178 /* GC_all_nils. */
179
180
181# define MAX_JUMP (HBLKSIZE - 1)
182
183# define HDR_FROM_BI(bi, p) \
184 ((bi)->index[((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
185# ifndef HASH_TL
186# define BI(p) (GC_top_index \
187 [(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
188# define HDR_INNER(p) HDR_FROM_BI(BI(p),p)
189# ifdef SMALL_CONFIG
190# define HDR(p) GC_find_header((ptr_t)(p))
191# else
192# define HDR(p) HDR_INNER(p)
193# endif
194# define GET_BI(p, bottom_indx) (bottom_indx) = BI(p)
195# define GET_HDR(p, hhdr) (hhdr) = HDR(p)
196# define SET_HDR(p, hhdr) HDR_INNER(p) = (hhdr)
197# define GET_HDR_ADDR(p, ha) (ha) = &(HDR_INNER(p))
198# else /* hash */
199/* Hash function for tree top level */
200# define TL_HASH(hi) ((hi) & (TOP_SZ - 1))
201/* Set bottom_indx to point to the bottom index for address p */
202# define GET_BI(p, bottom_indx) \
203 { \
204 register word hi = \
205 (word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
206 register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
207 \
208 while (_bi -> key != hi && _bi != GC_all_nils) \
209 _bi = _bi -> hash_link; \
210 (bottom_indx) = _bi; \
211 }
212# define GET_HDR_ADDR(p, ha) \
213 { \
214 register bottom_index * bi; \
215 \
216 GET_BI(p, bi); \
217 (ha) = &(HDR_FROM_BI(bi, p)); \
218 }
219# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
220 (hhdr) = *_ha; }
221# define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
222 *_ha = (hhdr); }
223# define HDR(p) GC_find_header((ptr_t)(p))
224# endif
225
226/* Is the result a forwarding address to someplace closer to the */
227/* beginning of the block or NIL? */
228# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((unsigned long) (hhdr) <= MAX_JUMP)
229
230/* Get an HBLKSIZE aligned address closer to the beginning of the block */
231/* h. Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr). */
232# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (unsigned long)(hhdr))
233# endif /* GC_HEADERS_H */
diff --git a/gc/include/private/gc_locks.h b/gc/include/private/gc_locks.h
deleted file mode 100644
index 35c37162260..00000000000
--- a/gc/include/private/gc_locks.h
+++ /dev/null
@@ -1,581 +0,0 @@
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6 *
7 *
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 *
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
16 */
17
18#ifndef GC_LOCKS_H
19#define GC_LOCKS_H
20
21/*
22 * Mutual exclusion between allocator/collector routines.
23 * Needed if there is more than one allocator thread.
24 * FASTLOCK() is assumed to try to acquire the lock in a cheap and
25 * dirty way that is acceptable for a few instructions, e.g. by
26 * inhibiting preemption. This is assumed to have succeeded only
27 * if a subsequent call to FASTLOCK_SUCCEEDED() returns TRUE.
28 * FASTUNLOCK() is called whether or not FASTLOCK_SUCCEEDED().
29 * If signals cannot be tolerated with the FASTLOCK held, then
30 * FASTLOCK should disable signals. The code executed under
31 * FASTLOCK is otherwise immune to interruption, provided it is
32 * not restarted.
33 * DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK
34 * and/or DISABLE_SIGNALS and ENABLE_SIGNALS and/or FASTLOCK.
35 * (There is currently no equivalent for FASTLOCK.)
36 *
37 * In the PARALLEL_MARK case, we also need to define a number of
38 * other inline finctions here:
39 * GC_bool GC_compare_and_exchange( volatile GC_word *addr,
40 * GC_word old, GC_word new )
41 * GC_word GC_atomic_add( volatile GC_word *addr, GC_word how_much )
42 * void GC_memory_barrier( )
43 *
44 */
45# ifdef THREADS
46 void GC_noop1 GC_PROTO((word));
47# ifdef PCR_OBSOLETE /* Faster, but broken with multiple lwp's */
48# include "th/PCR_Th.h"
49# include "th/PCR_ThCrSec.h"
50 extern struct PCR_Th_MLRep GC_allocate_ml;
51# define DCL_LOCK_STATE PCR_sigset_t GC_old_sig_mask
52# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
53# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
54# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
55# define FASTLOCK() PCR_ThCrSec_EnterSys()
56 /* Here we cheat (a lot): */
57# define FASTLOCK_SUCCEEDED() (*(int *)(&GC_allocate_ml) == 0)
58 /* TRUE if nobody currently holds the lock */
59# define FASTUNLOCK() PCR_ThCrSec_ExitSys()
60# endif
61# ifdef PCR
62# include <base/PCR_Base.h>
63# include <th/PCR_Th.h>
64 extern PCR_Th_ML GC_allocate_ml;
65# define DCL_LOCK_STATE \
66 PCR_ERes GC_fastLockRes; PCR_sigset_t GC_old_sig_mask
67# define LOCK() PCR_Th_ML_Acquire(&GC_allocate_ml)
68# define UNLOCK() PCR_Th_ML_Release(&GC_allocate_ml)
69# define FASTLOCK() (GC_fastLockRes = PCR_Th_ML_Try(&GC_allocate_ml))
70# define FASTLOCK_SUCCEEDED() (GC_fastLockRes == PCR_ERes_okay)
71# define FASTUNLOCK() {\
72 if( FASTLOCK_SUCCEEDED() ) PCR_Th_ML_Release(&GC_allocate_ml); }
73# endif
74# ifdef SRC_M3
75 extern GC_word RT0u__inCritical;
76# define LOCK() RT0u__inCritical++
77# define UNLOCK() RT0u__inCritical--
78# endif
79# ifdef GC_SOLARIS_THREADS
80# include <thread.h>
81# include <signal.h>
82 extern mutex_t GC_allocate_ml;
83# define LOCK() mutex_lock(&GC_allocate_ml);
84# define UNLOCK() mutex_unlock(&GC_allocate_ml);
85# endif
86
87/* Try to define GC_TEST_AND_SET and a matching GC_CLEAR for spin lock */
88/* acquisition and release. We need this for correct operation of the */
89/* incremental GC. */
90# ifdef __GNUC__
91# if defined(I386)
92 inline static int GC_test_and_set(volatile unsigned int *addr) {
93 int oldval;
94 /* Note: the "xchg" instruction does not need a "lock" prefix */
95 __asm__ __volatile__("xchgl %0, %1"
96 : "=r"(oldval), "=m"(*(addr))
97 : "0"(1), "m"(*(addr)) : "memory");
98 return oldval;
99 }
100# define GC_TEST_AND_SET_DEFINED
101# endif
102# if defined(IA64)
103 inline static int GC_test_and_set(volatile unsigned int *addr) {
104 long oldval, n = 1;
105 __asm__ __volatile__("xchg4 %0=%1,%2"
106 : "=r"(oldval), "=m"(*addr)
107 : "r"(n), "1"(*addr) : "memory");
108 return oldval;
109 }
110# define GC_TEST_AND_SET_DEFINED
111 /* Should this handle post-increment addressing?? */
112 inline static void GC_clear(volatile unsigned int *addr) {
113 __asm__ __volatile__("st4.rel %0=r0" : "=m" (*addr) : : "memory");
114 }
115# define GC_CLEAR_DEFINED
116# endif
117# ifdef SPARC
118 inline static int GC_test_and_set(volatile unsigned int *addr) {
119 int oldval;
120
121 __asm__ __volatile__("ldstub %1,%0"
122 : "=r"(oldval), "=m"(*addr)
123 : "m"(*addr) : "memory");
124 return oldval;
125 }
126# define GC_TEST_AND_SET_DEFINED
127# endif
128# ifdef M68K
129 /* Contributed by Tony Mantler. I'm not sure how well it was */
130 /* tested. */
131 inline static int GC_test_and_set(volatile unsigned int *addr) {
132 char oldval; /* this must be no longer than 8 bits */
133
134 /* The return value is semi-phony. */
135 /* 'tas' sets bit 7 while the return */
136 /* value pretends bit 0 was set */
137 __asm__ __volatile__(
138 "tas %1@; sne %0; negb %0"
139 : "=d" (oldval)
140 : "a" (addr) : "memory");
141 return oldval;
142 }
143# define GC_TEST_AND_SET_DEFINED
144# endif
145# if defined(POWERPC)
146 inline static int GC_test_and_set(volatile unsigned int *addr) {
147 int oldval;
148 int temp = 1; /* locked value */
149
150 __asm__ __volatile__(
151 "1:\tlwarx %0,0,%3\n" /* load and reserve */
152 "\tcmpwi %0, 0\n" /* if load is */
153 "\tbne 2f\n" /* non-zero, return already set */
154 "\tstwcx. %2,0,%1\n" /* else store conditional */
155 "\tbne- 1b\n" /* retry if lost reservation */
156 "2:\t\n" /* oldval is zero if we set */
157 : "=&r"(oldval), "=p"(addr)
158 : "r"(temp), "1"(addr)
159 : "memory");
160 return oldval;
161 }
162# define GC_TEST_AND_SET_DEFINED
163 inline static void GC_clear(volatile unsigned int *addr) {
164 __asm__ __volatile__("eieio" : : : "memory");
165 *(addr) = 0;
166 }
167# define GC_CLEAR_DEFINED
168# endif
169# if defined(ALPHA)
170 inline static int GC_test_and_set(volatile unsigned int * addr)
171 {
172 unsigned long oldvalue;
173 unsigned long temp;
174
175 __asm__ __volatile__(
176 "1: ldl_l %0,%1\n"
177 " and %0,%3,%2\n"
178 " bne %2,2f\n"
179 " xor %0,%3,%0\n"
180 " stl_c %0,%1\n"
181 " beq %0,3f\n"
182 " mb\n"
183 "2:\n"
184 ".section .text2,\"ax\"\n"
185 "3: br 1b\n"
186 ".previous"
187 :"=&r" (temp), "=m" (*addr), "=&r" (oldvalue)
188 :"Ir" (1), "m" (*addr)
189 :"memory");
190
191 return oldvalue;
192 }
193# define GC_TEST_AND_SET_DEFINED
194 inline static void GC_clear(volatile unsigned int *addr) {
195 __asm__ __volatile__("mb" : : : "memory");
196 *(addr) = 0;
197 }
198# define GC_CLEAR_DEFINED
199# endif /* ALPHA */
200# ifdef ARM32
201 inline static int GC_test_and_set(volatile unsigned int *addr) {
202 int oldval;
203 /* SWP on ARM is very similar to XCHG on x86. Doesn't lock the
204 * bus because there are no SMP ARM machines. If/when there are,
205 * this code will likely need to be updated. */
206 /* See linuxthreads/sysdeps/arm/pt-machine.h in glibc-2.1 */
207 __asm__ __volatile__("swp %0, %1, [%2]"
208 : "=r"(oldval)
209 : "r"(1), "r"(addr)
210 : "memory");
211 return oldval;
212 }
213# define GC_TEST_AND_SET_DEFINED
214# endif /* ARM32 */
215# ifdef S390
216 inline static int GC_test_and_set(volatile unsigned int *addr) {
217 int ret;
218 __asm__ __volatile__ (
219 " l %0,0(%2)\n"
220 "0: cs %0,%1,0(%2)\n"
221 " jl 0b"
222 : "=&d" (ret)
223 : "d" (1), "a" (addr)
224 : "cc", "memory");
225 return ret;
226 }
227# endif
228# endif /* __GNUC__ */
229# if (defined(ALPHA) && !defined(__GNUC__))
230# ifndef OSF1
231 --> We currently assume that if gcc is not used, we are
232 --> running under Tru64.
233# endif
234# include <machine/builtins.h>
235# include <c_asm.h>
236# define GC_test_and_set(addr) __ATOMIC_EXCH_LONG(addr, 1)
237# define GC_TEST_AND_SET_DEFINED
238# define GC_clear(addr) { asm("mb"); *(volatile unsigned *)addr = 0; }
239# define GC_CLEAR_DEFINED
240# endif
241# if defined(MSWIN32)
242# define GC_test_and_set(addr) InterlockedExchange((LPLONG)addr,1)
243# define GC_TEST_AND_SET_DEFINED
244# endif
245# ifdef MIPS
246# ifdef LINUX
247# include <sys/tas.h>
248# define GC_test_and_set(addr) _test_and_set((int *) addr,1)
249# define GC_TEST_AND_SET_DEFINED
250# elif __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) \
251 || !defined(_COMPILER_VERSION) || _COMPILER_VERSION < 700
252# ifdef __GNUC__
253# define GC_test_and_set(addr) _test_and_set(addr,1)
254# else
255# define GC_test_and_set(addr) test_and_set(addr,1)
256# endif
257# else
258# define GC_test_and_set(addr) __test_and_set(addr,1)
259# define GC_clear(addr) __lock_release(addr);
260# define GC_CLEAR_DEFINED
261# endif
262# define GC_TEST_AND_SET_DEFINED
263# endif /* MIPS */
264# if 0 /* defined(HP_PA) */
265 /* The official recommendation seems to be to not use ldcw from */
266 /* user mode. Since multithreaded incremental collection doesn't */
267 /* work anyway on HP_PA, this shouldn't be a major loss. */
268
269 /* "set" means 0 and "clear" means 1 here. */
270# define GC_test_and_set(addr) !GC_test_and_clear(addr);
271# define GC_TEST_AND_SET_DEFINED
272# define GC_clear(addr) GC_noop1((word)(addr)); *(volatile unsigned int *)addr = 1;
273 /* The above needs a memory barrier! */
274# define GC_CLEAR_DEFINED
275# endif
276# if defined(GC_TEST_AND_SET_DEFINED) && !defined(GC_CLEAR_DEFINED)
277# ifdef __GNUC__
278 inline static void GC_clear(volatile unsigned int *addr) {
279 /* Try to discourage gcc from moving anything past this. */
280 __asm__ __volatile__(" " : : : "memory");
281 *(addr) = 0;
282 }
283# else
284 /* The function call in the following should prevent the */
285 /* compiler from moving assignments to below the UNLOCK. */
286# define GC_clear(addr) GC_noop1((word)(addr)); \
287 *((volatile unsigned int *)(addr)) = 0;
288# endif
289# define GC_CLEAR_DEFINED
290# endif /* !GC_CLEAR_DEFINED */
291
292# if !defined(GC_TEST_AND_SET_DEFINED)
293# define USE_PTHREAD_LOCKS
294# endif
295
296# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
297 && !defined(GC_IRIX_THREADS) && !defined(GC_WIN32_THREADS)
298# define NO_THREAD (pthread_t)(-1)
299# include <pthread.h>
300# if defined(PARALLEL_MARK)
301 /* We need compare-and-swap to update mark bits, where it's */
302 /* performance critical. If USE_MARK_BYTES is defined, it is */
303 /* no longer needed for this purpose. However we use it in */
304 /* either case to implement atomic fetch-and-add, though that's */
305 /* less performance critical, and could perhaps be done with */
306 /* a lock. */
307# if defined(GENERIC_COMPARE_AND_SWAP)
308 /* Probably not useful, except for debugging. */
309 /* We do use GENERIC_COMPARE_AND_SWAP on PA_RISC, but we */
310 /* minimize its use. */
311 extern pthread_mutex_t GC_compare_and_swap_lock;
312
313 /* Note that if GC_word updates are not atomic, a concurrent */
314 /* reader should acquire GC_compare_and_swap_lock. On */
315 /* currently supported platforms, such updates are atomic. */
316 extern GC_bool GC_compare_and_exchange(volatile GC_word *addr,
317 GC_word old, GC_word new_val);
318# endif /* GENERIC_COMPARE_AND_SWAP */
319# if defined(I386)
320# if !defined(GENERIC_COMPARE_AND_SWAP)
321 /* Returns TRUE if the comparison succeeded. */
322 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
323 GC_word old,
324 GC_word new_val)
325 {
326 char result;
327 __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
328 : "=m"(*(addr)), "=r"(result)
329 : "r" (new_val), "0"(*(addr)), "a"(old) : "memory");
330 return (GC_bool) result;
331 }
332# endif /* !GENERIC_COMPARE_AND_SWAP */
333 inline static void GC_memory_barrier()
334 {
335 /* We believe the processor ensures at least processor */
336 /* consistent ordering. Thus a compiler barrier */
337 /* should suffice. */
338 __asm__ __volatile__("" : : : "memory");
339 }
340# endif /* I386 */
341# if defined(IA64)
342# if !defined(GENERIC_COMPARE_AND_SWAP)
343 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
344 GC_word old, GC_word new_val)
345 {
346 unsigned long oldval;
347 __asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.rel %0=%1,%2,ar.ccv"
348 : "=r"(oldval), "=m"(*addr)
349 : "r"(new_val), "1"(*addr), "r"(old) : "memory");
350 return (oldval == old);
351 }
352# endif /* !GENERIC_COMPARE_AND_SWAP */
353# if 0
354 /* Shouldn't be needed; we use volatile stores instead. */
355 inline static void GC_memory_barrier()
356 {
357 __asm__ __volatile__("mf" : : : "memory");
358 }
359# endif /* 0 */
360# endif /* IA64 */
361# if defined(ALPHA)
362# if !defined(GENERIC_COMPARE_AND_SWAP)
363# if defined(__GNUC__)
364 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
365 GC_word old, GC_word new_val)
366 {
367 unsigned long was_equal;
368 unsigned long temp;
369
370 __asm__ __volatile__(
371 "1: ldq_l %0,%1\n"
372 " cmpeq %0,%4,%2\n"
373 " mov %3,%0\n"
374 " beq %2,2f\n"
375 " stq_c %0,%1\n"
376 " beq %0,1b\n"
377 "2:\n"
378 " mb\n"
379 :"=&r" (temp), "=m" (*addr), "=&r" (was_equal)
380 : "r" (new_val), "Ir" (old)
381 :"memory");
382 return was_equal;
383 }
384# else /* !__GNUC__ */
385 inline static GC_bool GC_compare_and_exchange(volatile GC_word *addr,
386 GC_word old, GC_word new_val)
387 {
388 return __CMP_STORE_QUAD(addr, old, new_val, addr);
389 }
390# endif /* !__GNUC__ */
391# endif /* !GENERIC_COMPARE_AND_SWAP */
392# ifdef __GNUC__
393 inline static void GC_memory_barrier()
394 {
395 __asm__ __volatile__("mb" : : : "memory");
396 }
397# else
398# define GC_memory_barrier() asm("mb")
399# endif /* !__GNUC__ */
400# endif /* ALPHA */
401# if defined(S390)
402# if !defined(GENERIC_COMPARE_AND_SWAP)
403 inline static GC_bool GC_compare_and_exchange(volatile C_word *addr,
404 GC_word old, GC_word new_val)
405 {
406 int retval;
407 __asm__ __volatile__ (
408# ifndef __s390x__
409 " cs %1,%2,0(%3)\n"
410# else
411 " csg %1,%2,0(%3)\n"
412# endif
413 " ipm %0\n"
414 " srl %0,28\n"
415 : "=&d" (retval), "+d" (old)
416 : "d" (new_val), "a" (addr)
417 : "cc", "memory");
418 return retval == 0;
419 }
420# endif
421# endif
422# if !defined(GENERIC_COMPARE_AND_SWAP)
423 /* Returns the original value of *addr. */
424 inline static GC_word GC_atomic_add(volatile GC_word *addr,
425 GC_word how_much)
426 {
427 GC_word old;
428 do {
429 old = *addr;
430 } while (!GC_compare_and_exchange(addr, old, old+how_much));
431 return old;
432 }
433# else /* GENERIC_COMPARE_AND_SWAP */
434 /* So long as a GC_word can be atomically updated, it should */
435 /* be OK to read *addr without a lock. */
436 extern GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much);
437# endif /* GENERIC_COMPARE_AND_SWAP */
438
439# endif /* PARALLEL_MARK */
440
441# if !defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_LOCKS)
442 /* In the THREAD_LOCAL_ALLOC case, the allocation lock tends to */
443 /* be held for long periods, if it is held at all. Thus spinning */
444 /* and sleeping for fixed periods are likely to result in */
445 /* significant wasted time. We thus rely mostly on queued locks. */
446# define USE_SPIN_LOCK
447 extern volatile unsigned int GC_allocate_lock;
448 extern void GC_lock(void);
449 /* Allocation lock holder. Only set if acquired by client through */
450 /* GC_call_with_alloc_lock. */
451# ifdef GC_ASSERTIONS
452# define LOCK() \
453 { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); \
454 SET_LOCK_HOLDER(); }
455# define UNLOCK() \
456 { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
457 GC_clear(&GC_allocate_lock); }
458# else
459# define LOCK() \
460 { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
461# define UNLOCK() \
462 GC_clear(&GC_allocate_lock)
463# endif /* !GC_ASSERTIONS */
464# if 0
465 /* Another alternative for OSF1 might be: */
466# include <sys/mman.h>
467 extern msemaphore GC_allocate_semaphore;
468# define LOCK() { if (msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) \
469 != 0) GC_lock(); else GC_allocate_lock = 1; }
470 /* The following is INCORRECT, since the memory model is too weak. */
471 /* Is this true? Presumably msem_unlock has the right semantics? */
472 /* - HB */
473# define UNLOCK() { GC_allocate_lock = 0; \
474 msem_unlock(&GC_allocate_semaphore, 0); }
475# endif /* 0 */
476# else /* THREAD_LOCAL_ALLOC || USE_PTHREAD_LOCKS */
477# ifndef USE_PTHREAD_LOCKS
478# define USE_PTHREAD_LOCKS
479# endif
480# endif /* THREAD_LOCAL_ALLOC */
481# ifdef USE_PTHREAD_LOCKS
482# include <pthread.h>
483 extern pthread_mutex_t GC_allocate_ml;
484# ifdef GC_ASSERTIONS
485# define LOCK() \
486 { GC_lock(); \
487 SET_LOCK_HOLDER(); }
488# define UNLOCK() \
489 { GC_ASSERT(I_HOLD_LOCK()); UNSET_LOCK_HOLDER(); \
490 pthread_mutex_unlock(&GC_allocate_ml); }
491# else /* !GC_ASSERTIONS */
492# define LOCK() \
493 { if (0 != pthread_mutex_trylock(&GC_allocate_ml)) GC_lock(); }
494# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
495# endif /* !GC_ASSERTIONS */
496# endif /* USE_PTHREAD_LOCKS */
497# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
498# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
499# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
500 extern VOLATILE GC_bool GC_collecting;
501# define ENTER_GC() GC_collecting = 1;
502# define EXIT_GC() GC_collecting = 0;
503 extern void GC_lock(void);
504 extern pthread_t GC_lock_holder;
505# ifdef GC_ASSERTIONS
506 extern pthread_t GC_mark_lock_holder;
507# endif
508# endif /* GC_PTHREADS with linux_threads.c implementation */
509# if defined(GC_IRIX_THREADS)
510# include <pthread.h>
511 /* This probably should never be included, but I can't test */
512 /* on Irix anymore. */
513# include <mutex.h>
514
515 extern unsigned long GC_allocate_lock;
516 /* This is not a mutex because mutexes that obey the (optional) */
517 /* POSIX scheduling rules are subject to convoys in high contention */
518 /* applications. This is basically a spin lock. */
519 extern pthread_t GC_lock_holder;
520 extern void GC_lock(void);
521 /* Allocation lock holder. Only set if acquired by client through */
522 /* GC_call_with_alloc_lock. */
523# define SET_LOCK_HOLDER() GC_lock_holder = pthread_self()
524# define NO_THREAD (pthread_t)(-1)
525# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
526# define I_HOLD_LOCK() (pthread_equal(GC_lock_holder, pthread_self()))
527# define LOCK() { if (GC_test_and_set(&GC_allocate_lock)) GC_lock(); }
528# define UNLOCK() GC_clear(&GC_allocate_lock);
529 extern VOLATILE GC_bool GC_collecting;
530# define ENTER_GC() \
531 { \
532 GC_collecting = 1; \
533 }
534# define EXIT_GC() GC_collecting = 0;
535# endif /* GC_IRIX_THREADS */
536# if defined(GC_WIN32_THREADS)
537# if defined(GC_PTHREADS)
538# include <pthread.h>
539 extern pthread_mutex_t GC_allocate_ml;
540# define LOCK() pthread_mutex_lock(&GC_allocate_ml)
541# define UNLOCK() pthread_mutex_unlock(&GC_allocate_ml)
542# else
543# include <windows.h>
544 GC_API CRITICAL_SECTION GC_allocate_ml;
545# define LOCK() EnterCriticalSection(&GC_allocate_ml);
546# define UNLOCK() LeaveCriticalSection(&GC_allocate_ml);
547# endif
548# endif
549# ifndef SET_LOCK_HOLDER
550# define SET_LOCK_HOLDER()
551# define UNSET_LOCK_HOLDER()
552# define I_HOLD_LOCK() FALSE
553 /* Used on platforms were locks can be reacquired, */
554 /* so it doesn't matter if we lie. */
555# endif
556# else /* !THREADS */
557# define LOCK()
558# define UNLOCK()
559# endif /* !THREADS */
560# ifndef SET_LOCK_HOLDER
561# define SET_LOCK_HOLDER()
562# define UNSET_LOCK_HOLDER()
563# define I_HOLD_LOCK() FALSE
564 /* Used on platforms were locks can be reacquired, */
565 /* so it doesn't matter if we lie. */
566# endif
567# ifndef ENTER_GC
568# define ENTER_GC()
569# define EXIT_GC()
570# endif
571
572# ifndef DCL_LOCK_STATE
573# define DCL_LOCK_STATE
574# endif
575# ifndef FASTLOCK
576# define FASTLOCK() LOCK()
577# define FASTLOCK_SUCCEEDED() TRUE
578# define FASTUNLOCK() UNLOCK()
579# endif
580
581#endif /* GC_LOCKS_H */
diff --git a/gc/include/private/gc_pmark.h b/gc/include/private/gc_pmark.h
deleted file mode 100644
index c109738203a..00000000000
--- a/gc/include/private/gc_pmark.h
+++ /dev/null
@@ -1,397 +0,0 @@
1/*
2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 2001 by Hewlett-Packard Company. All rights reserved.
4 *
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 *
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
13 *
14 */
15
16/* Private declarations of GC marker data structures and macros */
17
18/*
19 * Declarations of mark stack. Needed by marker and client supplied mark
20 * routines. Transitively include gc_priv.h.
21 * (Note that gc_priv.h should not be included before this, since this
22 * includes dbg_mlc.h, which wants to include gc_priv.h AFTER defining
23 * I_HIDE_POINTERS.)
24 */
25#ifndef GC_PMARK_H
26# define GC_PMARK_H
27
28# if defined(KEEP_BACK_PTRS) || defined(PRINT_BLACK_LIST)
29# include "dbg_mlc.h"
30# endif
31# ifndef GC_MARK_H
32# include "../gc_mark.h"
33# endif
34# ifndef GC_PRIVATE_H
35# include "gc_priv.h"
36# endif
37
38/* The real declarations of the following is in gc_priv.h, so that */
39/* we can avoid scanning the following table. */
40/*
41extern mark_proc GC_mark_procs[MAX_MARK_PROCS];
42*/
43
44/*
45 * Mark descriptor stuff that should remain private for now, mostly
46 * because it's hard to export WORDSZ without including gcconfig.h.
47 */
48# define BITMAP_BITS (WORDSZ - GC_DS_TAG_BITS)
49# define PROC(descr) \
50 (GC_mark_procs[((descr) >> GC_DS_TAG_BITS) & (GC_MAX_MARK_PROCS-1)])
51# define ENV(descr) \
52 ((descr) >> (GC_DS_TAG_BITS + GC_LOG_MAX_MARK_PROCS))
53# define MAX_ENV \
54 (((word)1 << (WORDSZ - GC_DS_TAG_BITS - GC_LOG_MAX_MARK_PROCS)) - 1)
55
56
57extern word GC_n_mark_procs;
58
59/* Number of mark stack entries to discard on overflow. */
60#define GC_MARK_STACK_DISCARDS (INITIAL_MARK_STACK_SIZE/8)
61
62typedef struct GC_ms_entry {
63 GC_word * mse_start; /* First word of object */
64 GC_word mse_descr; /* Descriptor; low order two bits are tags, */
65 /* identifying the upper 30 bits as one of the */
66 /* following: */
67} mse;
68
69extern word GC_mark_stack_size;
70
71extern mse * GC_mark_stack_limit;
72
73#ifdef PARALLEL_MARK
74 extern mse * VOLATILE GC_mark_stack_top;
75#else
76 extern mse * GC_mark_stack_top;
77#endif
78
79extern mse * GC_mark_stack;
80
81#ifdef PARALLEL_MARK
82 /*
83 * Allow multiple threads to participate in the marking process.
84 * This works roughly as follows:
85 * The main mark stack never shrinks, but it can grow.
86 *
87 * The initiating threads holds the GC lock, and sets GC_help_wanted.
88 *
89 * Other threads:
90 * 1) update helper_count (while holding mark_lock.)
91 * 2) allocate a local mark stack
92 * repeatedly:
93 * 3) Steal a global mark stack entry by atomically replacing
94 * its descriptor with 0.
95 * 4) Copy it to the local stack.
96 * 5) Mark on the local stack until it is empty, or
97 * it may be profitable to copy it back.
98 * 6) If necessary, copy local stack to global one,
99 * holding mark lock.
100 * 7) Stop when the global mark stack is empty.
101 * 8) decrement helper_count (holding mark_lock).
102 *
103 * This is an experiment to see if we can do something along the lines
104 * of the University of Tokyo SGC in a less intrusive, though probably
105 * also less performant, way.
106 */
107 void GC_do_parallel_mark();
108 /* inititate parallel marking. */
109
110 extern GC_bool GC_help_wanted; /* Protected by mark lock */
111 extern unsigned GC_helper_count; /* Number of running helpers. */
112 /* Protected by mark lock */
113 extern unsigned GC_active_count; /* Number of active helpers. */
114 /* Protected by mark lock */
115 /* May increase and decrease */
116 /* within each mark cycle. But */
117 /* once it returns to 0, it */
118 /* stays zero for the cycle. */
119 /* GC_mark_stack_top is also protected by mark lock. */
120 extern mse * VOLATILE GC_first_nonempty;
121 /* Lowest entry on mark stack */
122 /* that may be nonempty. */
123 /* Updated only by initiating */
124 /* thread. */
125 /*
126 * GC_notify_all_marker() is used when GC_help_wanted is first set,
127 * when the last helper becomes inactive,
128 * when something is added to the global mark stack, and just after
129 * GC_mark_no is incremented.
130 * This could be split into multiple CVs (and probably should be to
131 * scale to really large numbers of processors.)
132 */
133#endif /* PARALLEL_MARK */
134
135/* Return a pointer to within 1st page of object. */
136/* Set *new_hdr_p to corr. hdr. */
137#ifdef __STDC__
138# ifdef PRINT_BLACK_LIST
139 ptr_t GC_find_start(ptr_t current, hdr *hhdr, hdr **new_hdr_p,
140 word source);
141# else
142 ptr_t GC_find_start(ptr_t current, hdr *hhdr, hdr **new_hdr_p);
143# endif
144#else
145 ptr_t GC_find_start();
146#endif
147
148mse * GC_signal_mark_stack_overflow GC_PROTO((mse *msp));
149
150# ifdef GATHERSTATS
151# define ADD_TO_ATOMIC(sz) GC_atomic_in_use += (sz)
152# define ADD_TO_COMPOSITE(sz) GC_composite_in_use += (sz)
153# else
154# define ADD_TO_ATOMIC(sz)
155# define ADD_TO_COMPOSITE(sz)
156# endif
157
158/* Push the object obj with corresponding heap block header hhdr onto */
159/* the mark stack. */
160# define PUSH_OBJ(obj, hhdr, mark_stack_top, mark_stack_limit) \
161{ \
162 register word _descr = (hhdr) -> hb_descr; \
163 \
164 if (_descr == 0) { \
165 ADD_TO_ATOMIC((hhdr) -> hb_sz); \
166 } else { \
167 ADD_TO_COMPOSITE((hhdr) -> hb_sz); \
168 mark_stack_top++; \
169 if (mark_stack_top >= mark_stack_limit) { \
170 mark_stack_top = GC_signal_mark_stack_overflow(mark_stack_top); \
171 } \
172 mark_stack_top -> mse_start = (obj); \
173 mark_stack_top -> mse_descr = _descr; \
174 } \
175}
176
177/* Push the contents of current onto the mark stack if it is a valid */
178/* ptr to a currently unmarked object. Mark it. */
179/* If we assumed a standard-conforming compiler, we could probably */
180/* generate the exit_label transparently. */
181# define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, \
182 source, exit_label) \
183{ \
184 hdr * my_hhdr; \
185 ptr_t my_current = current; \
186 \
187 GET_HDR(my_current, my_hhdr); \
188 if (IS_FORWARDING_ADDR_OR_NIL(my_hhdr)) { \
189 hdr * new_hdr = GC_invalid_header; \
190 my_current = GC_find_start(my_current, my_hhdr, &new_hdr); \
191 my_hhdr = new_hdr; \
192 } \
193 PUSH_CONTENTS_HDR(my_current, mark_stack_top, mark_stack_limit, \
194 source, exit_label, my_hhdr); \
195exit_label: ; \
196}
197
198/* As above, but use header cache for header lookup. */
199# define HC_PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, \
200 source, exit_label) \
201{ \
202 hdr * my_hhdr; \
203 ptr_t my_current = current; \
204 \
205 HC_GET_HDR(my_current, my_hhdr, source); \
206 PUSH_CONTENTS_HDR(my_current, mark_stack_top, mark_stack_limit, \
207 source, exit_label, my_hhdr); \
208exit_label: ; \
209}
210
211/* Set mark bit, exit if it was already set. */
212
213# ifdef USE_MARK_BYTES
214 /* Unlike the mark bit case, there is a race here, and we may set */
215 /* the bit twice in the concurrent case. This can result in the */
216 /* object being pushed twice. But that's only a performance issue. */
217# define SET_MARK_BIT_EXIT_IF_SET(hhdr,displ,exit_label) \
218 { \
219 register VOLATILE char * mark_byte_addr = \
220 hhdr -> hb_marks + ((displ) >> 1); \
221 register char mark_byte = *mark_byte_addr; \
222 \
223 if (mark_byte) goto exit_label; \
224 *mark_byte_addr = 1; \
225 }
226# else
227# define SET_MARK_BIT_EXIT_IF_SET(hhdr,displ,exit_label) \
228 { \
229 register word * mark_word_addr = hhdr -> hb_marks + divWORDSZ(displ); \
230 \
231 OR_WORD_EXIT_IF_SET(mark_word_addr, (word)1 << modWORDSZ(displ), \
232 exit_label); \
233 }
234# endif /* USE_MARK_BYTES */
235
236/* If the mark bit corresponding to current is not set, set it, and */
237/* push the contents of the object on the mark stack. For a small */
238/* object we assume that current is the (possibly interior) pointer */
239/* to the object. For large objects we assume that current points */
240/* to somewhere inside the first page of the object. If */
241/* GC_all_interior_pointers is set, it may have been previously */
242/* adjusted to make that true. */
243# define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
244 source, exit_label, hhdr) \
245{ \
246 int displ; /* Displacement in block; first bytes, then words */ \
247 int map_entry; \
248 \
249 displ = HBLKDISPL(current); \
250 map_entry = MAP_ENTRY((hhdr -> hb_map), displ); \
251 displ = BYTES_TO_WORDS(displ); \
252 if (map_entry > CPP_MAX_OFFSET) { \
253 if (map_entry == OFFSET_TOO_BIG) { \
254 map_entry = displ % (hhdr -> hb_sz); \
255 displ -= map_entry; \
256 if (displ + (hhdr -> hb_sz) > BYTES_TO_WORDS(HBLKSIZE)) { \
257 GC_ADD_TO_BLACK_LIST_NORMAL((word)current, source); \
258 goto exit_label; \
259 } \
260 } else { \
261 GC_ADD_TO_BLACK_LIST_NORMAL((word)current, source); goto exit_label; \
262 } \
263 } else { \
264 displ -= map_entry; \
265 } \
266 GC_ASSERT(displ >= 0 && displ < MARK_BITS_PER_HBLK); \
267 SET_MARK_BIT_EXIT_IF_SET(hhdr, displ, exit_label); \
268 GC_STORE_BACK_PTR((ptr_t)source, (ptr_t)HBLKPTR(current) \
269 + WORDS_TO_BYTES(displ)); \
270 PUSH_OBJ(((word *)(HBLKPTR(current)) + displ), hhdr, \
271 mark_stack_top, mark_stack_limit) \
272}
273
274#if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
275# define PUSH_ONE_CHECKED_STACK(p, source) \
276 GC_mark_and_push_stack(p, (ptr_t)(source))
277#else
278# define PUSH_ONE_CHECKED_STACK(p, source) \
279 GC_mark_and_push_stack(p)
280#endif
281
282/*
283 * Push a single value onto mark stack. Mark from the object pointed to by p.
284 * Invoke FIXUP_POINTER(p) before any further processing.
285 * P is considered valid even if it is an interior pointer.
286 * Previously marked objects are not pushed. Hence we make progress even
287 * if the mark stack overflows.
288 */
289
290# if NEED_FIXUP_POINTER
291 /* Try both the raw version and the fixed up one. */
292# define GC_PUSH_ONE_STACK(p, source) \
293 if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
294 && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
295 PUSH_ONE_CHECKED_STACK(p, source); \
296 } \
297 FIXUP_POINTER(p); \
298 if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
299 && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
300 PUSH_ONE_CHECKED_STACK(p, source); \
301 }
302# else /* !NEED_FIXUP_POINTER */
303# define GC_PUSH_ONE_STACK(p, source) \
304 if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
305 && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
306 PUSH_ONE_CHECKED_STACK(p, source); \
307 }
308# endif
309
310
311/*
312 * As above, but interior pointer recognition as for
313 * normal for heap pointers.
314 */
315# define GC_PUSH_ONE_HEAP(p,source) \
316 FIXUP_POINTER(p); \
317 if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
318 && (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
319 GC_mark_stack_top = GC_mark_and_push( \
320 (GC_PTR)(p), GC_mark_stack_top, \
321 GC_mark_stack_limit, (GC_PTR *)(source)); \
322 }
323
324/* Mark starting at mark stack entry top (incl.) down to */
325/* mark stack entry bottom (incl.). Stop after performing */
326/* about one page worth of work. Return the new mark stack */
327/* top entry. */
328mse * GC_mark_from GC_PROTO((mse * top, mse * bottom, mse *limit));
329
330#define MARK_FROM_MARK_STACK() \
331 GC_mark_stack_top = GC_mark_from(GC_mark_stack_top, \
332 GC_mark_stack, \
333 GC_mark_stack + GC_mark_stack_size);
334
335/*
336 * Mark from one finalizable object using the specified
337 * mark proc. May not mark the object pointed to by
338 * real_ptr. That is the job of the caller, if appropriate
339 */
340# define GC_MARK_FO(real_ptr, mark_proc) \
341{ \
342 (*(mark_proc))(real_ptr); \
343 while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK(); \
344 if (GC_mark_state != MS_NONE) { \
345 GC_set_mark_bit(real_ptr); \
346 while (!GC_mark_some((ptr_t)0)) {} \
347 } \
348}
349
350extern GC_bool GC_mark_stack_too_small;
351 /* We need a larger mark stack. May be */
352 /* set by client supplied mark routines.*/
353
354typedef int mark_state_t; /* Current state of marking, as follows:*/
355 /* Used to remember where we are during */
356 /* concurrent marking. */
357
358 /* We say something is dirty if it was */
359 /* written since the last time we */
360 /* retrieved dirty bits. We say it's */
361 /* grungy if it was marked dirty in the */
362 /* last set of bits we retrieved. */
363
364 /* Invariant I: all roots and marked */
365 /* objects p are either dirty, or point */
366 /* to objects q that are either marked */
367 /* or a pointer to q appears in a range */
368 /* on the mark stack. */
369
370# define MS_NONE 0 /* No marking in progress. I holds. */
371 /* Mark stack is empty. */
372
373# define MS_PUSH_RESCUERS 1 /* Rescuing objects are currently */
374 /* being pushed. I holds, except */
375 /* that grungy roots may point to */
376 /* unmarked objects, as may marked */
377 /* grungy objects above scan_ptr. */
378
379# define MS_PUSH_UNCOLLECTABLE 2
380 /* I holds, except that marked */
381 /* uncollectable objects above scan_ptr */
382 /* may point to unmarked objects. */
383 /* Roots may point to unmarked objects */
384
385# define MS_ROOTS_PUSHED 3 /* I holds, mark stack may be nonempty */
386
387# define MS_PARTIALLY_INVALID 4 /* I may not hold, e.g. because of M.S. */
388 /* overflow. However marked heap */
389 /* objects below scan_ptr point to */
390 /* marked or stacked objects. */
391
392# define MS_INVALID 5 /* I may not hold. */
393
394extern mark_state_t GC_mark_state;
395
396#endif /* GC_PMARK_H */
397
diff --git a/gc/include/private/gc_priv.h b/gc/include/private/gc_priv.h
deleted file mode 100644
index f2df5d4f5f8..00000000000
--- a/gc/include/private/gc_priv.h
+++ /dev/null
@@ -1,1914 +0,0 @@
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved.
6 *
7 *
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 *
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
16 */
17
18
19# ifndef GC_PRIVATE_H
20# define GC_PRIVATE_H
21
22#if defined(mips) && defined(SYSTYPE_BSD) && defined(sony_news)
23 /* sony RISC NEWS, NEWSOS 4 */
24# define BSD_TIME
25/* typedef long ptrdiff_t; -- necessary on some really old systems */
26#endif
27
28#if defined(mips) && defined(SYSTYPE_BSD43)
29 /* MIPS RISCOS 4 */
30# define BSD_TIME
31#endif
32
33#ifdef DGUX
34# include <sys/types.h>
35# include <sys/time.h>
36# include <sys/resource.h>
37#endif /* DGUX */
38
39#ifdef BSD_TIME
40# include <sys/types.h>
41# include <sys/time.h>
42# include <sys/resource.h>
43#endif /* BSD_TIME */
44
45# ifndef GC_H
46# include "gc.h"
47# endif
48
49# ifndef GC_MARK_H
50# include "../gc_mark.h"
51# endif
52
53typedef GC_word word;
54typedef GC_signed_word signed_word;
55
56typedef int GC_bool;
57# define TRUE 1
58# define FALSE 0
59
60typedef char * ptr_t; /* A generic pointer to which we can add */
61 /* byte displacements. */
62 /* Preferably identical to caddr_t, if it */
63 /* exists. */
64
65# ifndef GCCONFIG_H
66# include "gcconfig.h"
67# endif
68
69# ifndef HEADERS_H
70# include "gc_hdrs.h"
71# endif
72
73#if defined(__STDC__)
74# include <stdlib.h>
75# if !(defined( sony_news ) )
76# include <stddef.h>
77# endif
78# define VOLATILE volatile
79#else
80# ifdef MSWIN32
81# include <stdlib.h>
82# endif
83# define VOLATILE
84#endif
85
86#if 0 /* defined(__GNUC__) doesn't work yet */
87# define EXPECT(expr, outcome) __builtin_expect(expr,outcome)
88 /* Equivalent to (expr), but predict that usually (expr)==outcome. */
89#else
90# define EXPECT(expr, outcome) (expr)
91#endif /* __GNUC__ */
92
93# ifndef GC_LOCKS_H
94# include "gc_locks.h"
95# endif
96
97# ifdef STACK_GROWS_DOWN
98# define COOLER_THAN >
99# define HOTTER_THAN <
100# define MAKE_COOLER(x,y) if ((word)(x)+(y) > (word)(x)) {(x) += (y);} \
101 else {(x) = (word)ONES;}
102# define MAKE_HOTTER(x,y) (x) -= (y)
103# else
104# define COOLER_THAN <
105# define HOTTER_THAN >
106# define MAKE_COOLER(x,y) if ((word)(x)-(y) < (word)(x)) {(x) -= (y);} else {(x) = 0;}
107# define MAKE_HOTTER(x,y) (x) += (y)
108# endif
109
110#if defined(AMIGA) && defined(__SASC)
111# define GC_FAR __far
112#else
113# define GC_FAR
114#endif
115
116
117/*********************************/
118/* */
119/* Definitions for conservative */
120/* collector */
121/* */
122/*********************************/
123
124/*********************************/
125/* */
126/* Easily changeable parameters */
127/* */
128/*********************************/
129
130/* #define STUBBORN_ALLOC */
131 /* Enable stubborm allocation, and thus a limited */
132 /* form of incremental collection w/o dirty bits. */
133
134/* #define ALL_INTERIOR_POINTERS */
135 /* Forces all pointers into the interior of an */
136 /* object to be considered valid. Also causes the */
137 /* sizes of all objects to be inflated by at least */
138 /* one byte. This should suffice to guarantee */
139 /* that in the presence of a compiler that does */
140 /* not perform garbage-collector-unsafe */
141 /* optimizations, all portable, strictly ANSI */
142 /* conforming C programs should be safely usable */
143 /* with malloc replaced by GC_malloc and free */
144 /* calls removed. There are several disadvantages: */
145 /* 1. There are probably no interesting, portable, */
146 /* strictly ANSI conforming C programs. */
147 /* 2. This option makes it hard for the collector */
148 /* to allocate space that is not ``pointed to'' */
149 /* by integers, etc. Under SunOS 4.X with a */
150 /* statically linked libc, we empiricaly */
151 /* observed that it would be difficult to */
152 /* allocate individual objects larger than 100K. */
153 /* Even if only smaller objects are allocated, */
154 /* more swap space is likely to be needed. */
155 /* Fortunately, much of this will never be */
156 /* touched. */
157 /* If you can easily avoid using this option, do. */
158 /* If not, try to keep individual objects small. */
159 /* This is now really controlled at startup, */
160 /* through GC_all_interior_pointers. */
161
162#define PRINTSTATS /* Print garbage collection statistics */
163 /* For less verbose output, undefine in reclaim.c */
164
165#define PRINTTIMES /* Print the amount of time consumed by each garbage */
166 /* collection. */
167
168#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
169 /* whether the objects are atomic or composite, and */
170 /* whether or not the block was found to be empty */
171 /* during the reclaim phase. Typically generates */
172 /* about one screenful per garbage collection. */
173#undef PRINTBLOCKS
174
175#ifdef SILENT
176# ifdef PRINTSTATS
177# undef PRINTSTATS
178# endif
179# ifdef PRINTTIMES
180# undef PRINTTIMES
181# endif
182# ifdef PRINTNBLOCKS
183# undef PRINTNBLOCKS
184# endif
185#endif
186
187#if defined(PRINTSTATS) && !defined(GATHERSTATS)
188# define GATHERSTATS
189#endif
190
191#if defined(PRINTSTATS) || !defined(SMALL_CONFIG)
192# define CONDPRINT /* Print some things if GC_print_stats is set */
193#endif
194
195#define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers()
196
197#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
198 /* free lists are actually maintained. This applies */
199 /* only to the top level routines in misc.c, not to */
200 /* user generated code that calls GC_allocobj and */
201 /* GC_allocaobj directly. */
202 /* Slows down average programs slightly. May however */
203 /* substantially reduce fragmentation if allocation */
204 /* request sizes are widely scattered. */
205 /* May save significant amounts of space for obj_map */
206 /* entries. */
207
208#if defined(USE_MARK_BYTES) && !defined(ALIGN_DOUBLE)
209# define ALIGN_DOUBLE
210 /* We use one byte for every 2 words, which doesn't allow for */
211 /* odd numbered words to have mark bits. */
212#endif
213
214#if defined(GC_GCJ_SUPPORT) && ALIGNMENT < 8 && !defined(ALIGN_DOUBLE)
215 /* GCJ's Hashtable synchronization code requires 64-bit alignment. */
216# define ALIGN_DOUBLE
217#endif
218
219/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
220# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
221# define MERGE_SIZES
222# endif
223
224#if !defined(DONT_ADD_BYTE_AT_END)
225# define EXTRA_BYTES GC_all_interior_pointers
226#else
227# define EXTRA_BYTES 0
228#endif
229
230
231# ifndef LARGE_CONFIG
232# define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
233 /* Must be multiple of largest page size. */
234# define MAXHINCR 2048 /* Maximum heap increment, in blocks */
235# else
236# define MINHINCR 64
237# define MAXHINCR 4096
238# endif
239
240# define TIME_LIMIT 50 /* We try to keep pause times from exceeding */
241 /* this by much. In milliseconds. */
242
243# define BL_LIMIT GC_black_list_spacing
244 /* If we need a block of N bytes, and we have */
245 /* a block of N + BL_LIMIT bytes available, */
246 /* and N > BL_LIMIT, */
247 /* but all possible positions in it are */
248 /* blacklisted, we just use it anyway (and */
249 /* print a warning, if warnings are enabled). */
250 /* This risks subsequently leaking the block */
251 /* due to a false reference. But not using */
252 /* the block risks unreasonable immediate */
253 /* heap growth. */
254
255/*********************************/
256/* */
257/* Stack saving for debugging */
258/* */
259/*********************************/
260
261#ifdef SAVE_CALL_CHAIN
262
263/* Fill in the pc and argument information for up to NFRAMES of my */
264/* callers. Ignore my frame and my callers frame. */
265struct callinfo;
266void GC_save_callers GC_PROTO((struct callinfo info[NFRAMES]));
267
268void GC_print_callers GC_PROTO((struct callinfo info[NFRAMES]));
269
270#endif
271
272#ifdef NEED_CALLINFO
273 struct callinfo {
274 word ci_pc; /* Caller, not callee, pc */
275# if NARGS > 0
276 word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
277# endif
278# if defined(ALIGN_DOUBLE) && (NFRAMES * (NARGS + 1)) % 2 == 1
279 /* Likely alignment problem. */
280 word ci_dummy;
281# endif
282 };
283#endif
284
285
286/*********************************/
287/* */
288/* OS interface routines */
289/* */
290/*********************************/
291
292#ifdef BSD_TIME
293# undef CLOCK_TYPE
294# undef GET_TIME
295# undef MS_TIME_DIFF
296# define CLOCK_TYPE struct timeval
297# define GET_TIME(x) { struct rusage rusage; \
298 getrusage (RUSAGE_SELF, &rusage); \
299 x = rusage.ru_utime; }
300# define MS_TIME_DIFF(a,b) ((double) (a.tv_sec - b.tv_sec) * 1000.0 \
301 + (double) (a.tv_usec - b.tv_usec) / 1000.0)
302#else /* !BSD_TIME */
303# if defined(MSWIN32) || defined(MSWINCE)
304# include <windows.h>
305# include <winbase.h>
306# define CLOCK_TYPE DWORD
307# define GET_TIME(x) x = GetTickCount()
308# define MS_TIME_DIFF(a,b) ((long)((a)-(b)))
309# else /* !MSWIN32, !MSWINCE, !BSD_TIME */
310# include <time.h>
311# if !defined(__STDC__) && defined(SPARC) && defined(SUNOS4)
312 clock_t clock(); /* Not in time.h, where it belongs */
313# endif
314# if defined(FREEBSD) && !defined(CLOCKS_PER_SEC)
315# include <machine/limits.h>
316# define CLOCKS_PER_SEC CLK_TCK
317# endif
318# if !defined(CLOCKS_PER_SEC)
319# define CLOCKS_PER_SEC 1000000
320/*
321 * This is technically a bug in the implementation. ANSI requires that
322 * CLOCKS_PER_SEC be defined. But at least under SunOS4.1.1, it isn't.
323 * Also note that the combination of ANSI C and POSIX is incredibly gross
324 * here. The type clock_t is used by both clock() and times(). But on
325 * some machines these use different notions of a clock tick, CLOCKS_PER_SEC
326 * seems to apply only to clock. Hence we use it here. On many machines,
327 * including SunOS, clock actually uses units of microseconds (which are
328 * not really clock ticks).
329 */
330# endif
331# define CLOCK_TYPE clock_t
332# define GET_TIME(x) x = clock()
333# define MS_TIME_DIFF(a,b) ((unsigned long) \
334 (1000.0*(double)((a)-(b))/(double)CLOCKS_PER_SEC))
335# endif /* !MSWIN32 */
336#endif /* !BSD_TIME */
337
338/* We use bzero and bcopy internally. They may not be available. */
339# if defined(SPARC) && defined(SUNOS4)
340# define BCOPY_EXISTS
341# endif
342# if defined(M68K) && defined(AMIGA)
343# define BCOPY_EXISTS
344# endif
345# if defined(M68K) && defined(NEXT)
346# define BCOPY_EXISTS
347# endif
348# if defined(VAX)
349# define BCOPY_EXISTS
350# endif
351# if defined(AMIGA)
352# include <string.h>
353# define BCOPY_EXISTS
354# endif
355# if defined(MACOSX)
356# define BCOPY_EXISTS
357# endif
358
359# ifndef BCOPY_EXISTS
360# include <string.h>
361# define BCOPY(x,y,n) memcpy(y, x, (size_t)(n))
362# define BZERO(x,n) memset(x, 0, (size_t)(n))
363# else
364# define BCOPY(x,y,n) bcopy((char *)(x),(char *)(y),(int)(n))
365# define BZERO(x,n) bzero((char *)(x),(int)(n))
366# endif
367
368/* Delay any interrupts or signals that may abort this thread. Data */
369/* structures are in a consistent state outside this pair of calls. */
370/* ANSI C allows both to be empty (though the standard isn't very */
371/* clear on that point). Standard malloc implementations are usually */
372/* neither interruptable nor thread-safe, and thus correspond to */
373/* empty definitions. */
374/* It probably doesn't make any sense to declare these to be nonempty */
375/* if the code is being optimized, since signal safety relies on some */
376/* ordering constraints that are typically not obeyed by optimizing */
377/* compilers. */
378# ifdef PCR
379# define DISABLE_SIGNALS() \
380 PCR_Th_SetSigMask(PCR_allSigsBlocked,&GC_old_sig_mask)
381# define ENABLE_SIGNALS() \
382 PCR_Th_SetSigMask(&GC_old_sig_mask, NIL)
383# else
384# if defined(THREADS) || defined(AMIGA) \
385 || defined(MSWIN32) || defined(MSWINCE) || defined(MACOS) \
386 || defined(DJGPP) || defined(NO_SIGNALS)
387 /* Also useful for debugging. */
388 /* Should probably use thr_sigsetmask for GC_SOLARIS_THREADS. */
389# define DISABLE_SIGNALS()
390# define ENABLE_SIGNALS()
391# else
392# define DISABLE_SIGNALS() GC_disable_signals()
393 void GC_disable_signals();
394# define ENABLE_SIGNALS() GC_enable_signals()
395 void GC_enable_signals();
396# endif
397# endif
398
399/*
400 * Stop and restart mutator threads.
401 */
402# ifdef PCR
403# include "th/PCR_ThCtl.h"
404# define STOP_WORLD() \
405 PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
406 PCR_allSigsBlocked, \
407 PCR_waitForever)
408# define START_WORLD() \
409 PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \
410 PCR_allSigsBlocked, \
411 PCR_waitForever);
412# else
413# if defined(GC_SOLARIS_THREADS) || defined(GC_WIN32_THREADS) \
414 || defined(GC_PTHREADS)
415 void GC_stop_world();
416 void GC_start_world();
417# define STOP_WORLD() GC_stop_world()
418# define START_WORLD() GC_start_world()
419# else
420# define STOP_WORLD()
421# define START_WORLD()
422# endif
423# endif
424
425/* Abandon ship */
426# ifdef PCR
427# define ABORT(s) PCR_Base_Panic(s)
428# else
429# ifdef SMALL_CONFIG
430# define ABORT(msg) abort();
431# else
432 GC_API void GC_abort GC_PROTO((GC_CONST char * msg));
433# define ABORT(msg) GC_abort(msg);
434# endif
435# endif
436
437/* Exit abnormally, but without making a mess (e.g. out of memory) */
438# ifdef PCR
439# define EXIT() PCR_Base_Exit(1,PCR_waitForever)
440# else
441# define EXIT() (void)exit(1)
442# endif
443
444/* Print warning message, e.g. almost out of memory. */
445# define WARN(msg,arg) (*GC_current_warn_proc)("GC Warning: " msg, (GC_word)(arg))
446extern GC_warn_proc GC_current_warn_proc;
447
448/* Get environment entry */
449#if !defined(NO_GETENV)
450# define GETENV(name) getenv(name)
451#else
452# define GETENV(name) 0
453#endif
454
455/*********************************/
456/* */
457/* Word-size-dependent defines */
458/* */
459/*********************************/
460
461#if CPP_WORDSZ == 32
462# define WORDS_TO_BYTES(x) ((x)<<2)
463# define BYTES_TO_WORDS(x) ((x)>>2)
464# define LOGWL ((word)5) /* log[2] of CPP_WORDSZ */
465# define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */
466# if ALIGNMENT != 4
467# define UNALIGNED
468# endif
469#endif
470
471#if CPP_WORDSZ == 64
472# define WORDS_TO_BYTES(x) ((x)<<3)
473# define BYTES_TO_WORDS(x) ((x)>>3)
474# define LOGWL ((word)6) /* log[2] of CPP_WORDSZ */
475# define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */
476# if ALIGNMENT != 8
477# define UNALIGNED
478# endif
479#endif
480
481#define WORDSZ ((word)CPP_WORDSZ)
482#define SIGNB ((word)1 << (WORDSZ-1))
483#define BYTES_PER_WORD ((word)(sizeof (word)))
484#define ONES ((word)(signed_word)(-1))
485#define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */
486
487/*********************/
488/* */
489/* Size Parameters */
490/* */
491/*********************/
492
493/* heap block size, bytes. Should be power of 2 */
494
495#ifndef HBLKSIZE
496# ifdef SMALL_CONFIG
497# define CPP_LOG_HBLKSIZE 10
498# else
499# if (CPP_WORDSZ == 32) || (defined(HPUX) && defined(HP_PA))
500 /* HPUX/PA seems to use 4K pages with the 64 bit ABI */
501# define CPP_LOG_HBLKSIZE 12
502# else
503# define CPP_LOG_HBLKSIZE 13
504# endif
505# endif
506#else
507# if HBLKSIZE == 512
508# define CPP_LOG_HBLKSIZE 9
509# endif
510# if HBLKSIZE == 1024
511# define CPP_LOG_HBLKSIZE 10
512# endif
513# if HBLKSIZE == 2048
514# define CPP_LOG_HBLKSIZE 11
515# endif
516# if HBLKSIZE == 4096
517# define CPP_LOG_HBLKSIZE 12
518# endif
519# if HBLKSIZE == 8192
520# define CPP_LOG_HBLKSIZE 13
521# endif
522# if HBLKSIZE == 16384
523# define CPP_LOG_HBLKSIZE 14
524# endif
525# ifndef CPP_LOG_HBLKSIZE
526 --> fix HBLKSIZE
527# endif
528# undef HBLKSIZE
529#endif
530# define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
531# define LOG_HBLKSIZE ((word)CPP_LOG_HBLKSIZE)
532# define HBLKSIZE ((word)CPP_HBLKSIZE)
533
534
535/* max size objects supported by freelist (larger objects may be */
536/* allocated, but less efficiently) */
537
538#define CPP_MAXOBJBYTES (CPP_HBLKSIZE/2)
539#define MAXOBJBYTES ((word)CPP_MAXOBJBYTES)
540#define CPP_MAXOBJSZ BYTES_TO_WORDS(CPP_HBLKSIZE/2)
541#define MAXOBJSZ ((word)CPP_MAXOBJSZ)
542
543# define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
544
545# define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q)
546 /* Equivalent to subtracting 2 hblk pointers. */
547 /* We do it this way because a compiler should */
548 /* find it hard to use an integer division */
549 /* instead of a shift. The bundled SunOS 4.1 */
550 /* o.w. sometimes pessimizes the subtraction to */
551 /* involve a call to .div. */
552
553# define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
554
555# define HBLKPTR(objptr) ((struct hblk *)(((word) (objptr)) & ~(HBLKSIZE-1)))
556
557# define HBLKDISPL(objptr) (((word) (objptr)) & (HBLKSIZE-1))
558
559/* Round up byte allocation requests to integral number of words, etc. */
560# define ROUNDED_UP_WORDS(n) \
561 BYTES_TO_WORDS((n) + (WORDS_TO_BYTES(1) - 1 + EXTRA_BYTES))
562# ifdef ALIGN_DOUBLE
563# define ALIGNED_WORDS(n) \
564 (BYTES_TO_WORDS((n) + WORDS_TO_BYTES(2) - 1 + EXTRA_BYTES) & ~1)
565# else
566# define ALIGNED_WORDS(n) ROUNDED_UP_WORDS(n)
567# endif
568# define SMALL_OBJ(bytes) ((bytes) < (MAXOBJBYTES - EXTRA_BYTES))
569# define ADD_SLOP(bytes) ((bytes) + EXTRA_BYTES)
570# ifndef MIN_WORDS
571 /* MIN_WORDS is the size of the smallest allocated object. */
572 /* 1 and 2 are the only valid values. */
573 /* 2 must be used if: */
574 /* - GC_gcj_malloc can be used for objects of requested */
575 /* size smaller than 2 words, or */
576 /* - USE_MARK_BYTES is defined. */
577# if defined(USE_MARK_BYTES) || defined(GC_GCJ_SUPPORT)
578# define MIN_WORDS 2 /* Smallest allocated object. */
579# else
580# define MIN_WORDS 1
581# endif
582# endif
583
584
585/*
586 * Hash table representation of sets of pages. This assumes it is
587 * OK to add spurious entries to sets.
588 * Used by black-listing code, and perhaps by dirty bit maintenance code.
589 */
590
591# ifdef LARGE_CONFIG
592# define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */
593 /* which is >= 4GB. Each table takes */
594 /* 128KB, some of which may never be */
595 /* touched. */
596# else
597# ifdef SMALL_CONFIG
598# define LOG_PHT_ENTRIES 14 /* Collisions are likely if heap grows */
599 /* to more than 16K hblks = 64MB. */
600 /* Each hash table occupies 2K bytes. */
601# else /* default "medium" configuration */
602# define LOG_PHT_ENTRIES 16 /* Collisions are likely if heap grows */
603 /* to more than 64K hblks >= 256MB. */
604 /* Each hash table occupies 8K bytes. */
605# endif
606# endif
607# define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
608# define PHT_SIZE (PHT_ENTRIES >> LOGWL)
609typedef word page_hash_table[PHT_SIZE];
610
611# define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1))
612
613# define get_pht_entry_from_index(bl, index) \
614 (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
615# define set_pht_entry_from_index(bl, index) \
616 (bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index)
617# define clear_pht_entry_from_index(bl, index) \
618 (bl)[divWORDSZ(index)] &= ~((word)1 << modWORDSZ(index))
619/* And a dumb but thread-safe version of set_pht_entry_from_index. */
620/* This sets (many) extra bits. */
621# define set_pht_entry_from_index_safe(bl, index) \
622 (bl)[divWORDSZ(index)] = ONES
623
624
625
626/********************************************/
627/* */
628/* H e a p B l o c k s */
629/* */
630/********************************************/
631
632/* heap block header */
633#define HBLKMASK (HBLKSIZE-1)
634
635#define BITS_PER_HBLK (CPP_HBLKSIZE * 8)
636
637#define MARK_BITS_PER_HBLK (BITS_PER_HBLK/CPP_WORDSZ)
638 /* upper bound */
639 /* We allocate 1 bit/word, unless USE_MARK_BYTES */
640 /* is defined. Only the first word */
641 /* in each object is actually marked. */
642
643# ifdef USE_MARK_BYTES
644# define MARK_BITS_SZ (MARK_BITS_PER_HBLK/2)
645 /* Unlike the other case, this is in units of bytes. */
646 /* We actually allocate only every second mark bit, since we */
647 /* force all objects to be doubleword aligned. */
648 /* However, each mark bit is allocated as a byte. */
649# else
650# define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ)
651# endif
652
653/* We maintain layout maps for heap blocks containing objects of a given */
654/* size. Each entry in this map describes a byte offset and has the */
655/* following type. */
656typedef unsigned char map_entry_type;
657
658struct hblkhdr {
659 word hb_sz; /* If in use, size in words, of objects in the block. */
660 /* if free, the size in bytes of the whole block */
661 struct hblk * hb_next; /* Link field for hblk free list */
662 /* and for lists of chunks waiting to be */
663 /* reclaimed. */
664 struct hblk * hb_prev; /* Backwards link for free list. */
665 word hb_descr; /* object descriptor for marking. See */
666 /* mark.h. */
667 map_entry_type * hb_map;
668 /* A pointer to a pointer validity map of the block. */
669 /* See GC_obj_map. */
670 /* Valid for all blocks with headers. */
671 /* Free blocks point to GC_invalid_map. */
672 unsigned char hb_obj_kind;
673 /* Kind of objects in the block. Each kind */
674 /* identifies a mark procedure and a set of */
675 /* list headers. Sometimes called regions. */
676 unsigned char hb_flags;
677# define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
678 /* point to the first page of */
679 /* this object. */
680# define WAS_UNMAPPED 2 /* This is a free block, which has */
681 /* been unmapped from the address */
682 /* space. */
683 /* GC_remap must be invoked on it */
684 /* before it can be reallocated. */
685 /* Only set with USE_MUNMAP. */
686 unsigned short hb_last_reclaimed;
687 /* Value of GC_gc_no when block was */
688 /* last allocated or swept. May wrap. */
689 /* For a free block, this is maintained */
690 /* only for USE_MUNMAP, and indicates */
691 /* when the header was allocated, or */
692 /* when the size of the block last */
693 /* changed. */
694# ifdef USE_MARK_BYTES
695 union {
696 char _hb_marks[MARK_BITS_SZ];
697 /* The i'th byte is 1 if the object */
698 /* starting at word 2i is marked, 0 o.w. */
699 word dummy; /* Force word alignment of mark bytes. */
700 } _mark_byte_union;
701# define hb_marks _mark_byte_union._hb_marks
702# else
703 word hb_marks[MARK_BITS_SZ];
704 /* Bit i in the array refers to the */
705 /* object starting at the ith word (header */
706 /* INCLUDED) in the heap block. */
707 /* The lsb of word 0 is numbered 0. */
708 /* Unused bits are invalid, and are */
709 /* occasionally set, e.g for uncollectable */
710 /* objects. */
711# endif /* !USE_MARK_BYTES */
712};
713
714/* heap block body */
715
716# define BODY_SZ (HBLKSIZE/sizeof(word))
717
718struct hblk {
719 word hb_body[BODY_SZ];
720};
721
722# define HBLK_IS_FREE(hdr) ((hdr) -> hb_map == GC_invalid_map)
723
724# define OBJ_SZ_TO_BLOCKS(sz) \
725 divHBLKSZ(WORDS_TO_BYTES(sz) + HBLKSIZE-1)
726 /* Size of block (in units of HBLKSIZE) needed to hold objects of */
727 /* given sz (in words). */
728
729/* Object free list link */
730# define obj_link(p) (*(ptr_t *)(p))
731
732# define LOG_MAX_MARK_PROCS 6
733# define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS)
734
735/* Root sets. Logically private to mark_rts.c. But we don't want the */
736/* tables scanned, so we put them here. */
737/* MAX_ROOT_SETS is the maximum number of ranges that can be */
738/* registered as static roots. */
739# ifdef LARGE_CONFIG
740# define MAX_ROOT_SETS 4096
741# else
742# ifdef PCR
743# define MAX_ROOT_SETS 1024
744# else
745# if defined(MSWIN32) || defined(MSWINCE)
746# define MAX_ROOT_SETS 1024
747 /* Under NT, we add only written pages, which can result */
748 /* in many small root sets. */
749# else
750# define MAX_ROOT_SETS 256
751# endif
752# endif
753# endif
754
755# define MAX_EXCLUSIONS (MAX_ROOT_SETS/4)
756/* Maximum number of segments that can be excluded from root sets. */
757
758/*
759 * Data structure for excluded static roots.
760 */
761struct exclusion {
762 ptr_t e_start;
763 ptr_t e_end;
764};
765
766/* Data structure for list of root sets. */
767/* We keep a hash table, so that we can filter out duplicate additions. */
768/* Under Win32, we need to do a better job of filtering overlaps, so */
769/* we resort to sequential search, and pay the price. */
770struct roots {
771 ptr_t r_start;
772 ptr_t r_end;
773# if !defined(MSWIN32) && !defined(MSWINCE)
774 struct roots * r_next;
775# endif
776 GC_bool r_tmp;
777 /* Delete before registering new dynamic libraries */
778};
779
780#if !defined(MSWIN32) && !defined(MSWINCE)
781 /* Size of hash table index to roots. */
782# define LOG_RT_SIZE 6
783# define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */
784#endif
785
786/* Lists of all heap blocks and free lists */
787/* as well as other random data structures */
788/* that should not be scanned by the */
789/* collector. */
790/* These are grouped together in a struct */
791/* so that they can be easily skipped by the */
792/* GC_mark routine. */
793/* The ordering is weird to make GC_malloc */
794/* faster by keeping the important fields */
795/* sufficiently close together that a */
796/* single load of a base register will do. */
797/* Scalars that could easily appear to */
798/* be pointers are also put here. */
799/* The main fields should precede any */
800/* conditionally included fields, so that */
801/* gc_inl.h will work even if a different set */
802/* of macros is defined when the client is */
803/* compiled. */
804
805struct _GC_arrays {
806 word _heapsize;
807 word _max_heapsize;
808 word _requested_heapsize; /* Heap size due to explicit expansion */
809 ptr_t _last_heap_addr;
810 ptr_t _prev_heap_addr;
811 word _large_free_bytes;
812 /* Total bytes contained in blocks on large object free */
813 /* list. */
814 word _large_allocd_bytes;
815 /* Total number of bytes in allocated large objects blocks. */
816 /* For the purposes of this counter and the next one only, a */
817 /* large object is one that occupies a block of at least */
818 /* 2*HBLKSIZE. */
819 word _max_large_allocd_bytes;
820 /* Maximum number of bytes that were ever allocated in */
821 /* large object blocks. This is used to help decide when it */
822 /* is safe to split up a large block. */
823 word _words_allocd_before_gc;
824 /* Number of words allocated before this */
825 /* collection cycle. */
826# ifndef SEPARATE_GLOBALS
827 word _words_allocd;
828 /* Number of words allocated during this collection cycle */
829# endif
830 word _words_wasted;
831 /* Number of words wasted due to internal fragmentation */
832 /* in large objects, or due to dropping blacklisted */
833 /* blocks, since last gc. Approximate. */
834 word _words_finalized;
835 /* Approximate number of words in objects (and headers) */
836 /* That became ready for finalization in the last */
837 /* collection. */
838 word _non_gc_bytes_at_gc;
839 /* Number of explicitly managed bytes of storage */
840 /* at last collection. */
841 word _mem_freed;
842 /* Number of explicitly deallocated words of memory */
843 /* since last collection. */
844 word _finalizer_mem_freed;
845 /* Words of memory explicitly deallocated while */
846 /* finalizers were running. Used to approximate mem. */
847 /* explicitly deallocated by finalizers. */
848 ptr_t _scratch_end_ptr;
849 ptr_t _scratch_last_end_ptr;
850 /* Used by headers.c, and can easily appear to point to */
851 /* heap. */
852 GC_mark_proc _mark_procs[MAX_MARK_PROCS];
853 /* Table of user-defined mark procedures. There is */
854 /* a small number of these, which can be referenced */
855 /* by DS_PROC mark descriptors. See gc_mark.h. */
856
857# ifndef SEPARATE_GLOBALS
858 ptr_t _objfreelist[MAXOBJSZ+1];
859 /* free list for objects */
860 ptr_t _aobjfreelist[MAXOBJSZ+1];
861 /* free list for atomic objs */
862# endif
863
864 ptr_t _uobjfreelist[MAXOBJSZ+1];
865 /* uncollectable but traced objs */
866 /* objects on this and auobjfreelist */
867 /* are always marked, except during */
868 /* garbage collections. */
869# ifdef ATOMIC_UNCOLLECTABLE
870 ptr_t _auobjfreelist[MAXOBJSZ+1];
871# endif
872 /* uncollectable but traced objs */
873
874# ifdef GATHERSTATS
875 word _composite_in_use;
876 /* Number of words in accessible composite */
877 /* objects. */
878 word _atomic_in_use;
879 /* Number of words in accessible atomic */
880 /* objects. */
881# endif
882# ifdef USE_MUNMAP
883 word _unmapped_bytes;
884# endif
885# ifdef MERGE_SIZES
886 unsigned _size_map[WORDS_TO_BYTES(MAXOBJSZ+1)];
887 /* Number of words to allocate for a given allocation request in */
888 /* bytes. */
889# endif
890
891# ifdef STUBBORN_ALLOC
892 ptr_t _sobjfreelist[MAXOBJSZ+1];
893# endif
894 /* free list for immutable objects */
895 map_entry_type * _obj_map[MAXOBJSZ+1];
896 /* If not NIL, then a pointer to a map of valid */
897 /* object addresses. _obj_map[sz][i] is j if the */
898 /* address block_start+i is a valid pointer */
899 /* to an object at block_start + */
900 /* WORDS_TO_BYTES(BYTES_TO_WORDS(i) - j) */
901 /* I.e. j is a word displacement from the */
902 /* object beginning. */
903 /* The entry is OBJ_INVALID if the corresponding */
904 /* address is not a valid pointer. It is */
905 /* OFFSET_TOO_BIG if the value j would be too */
906 /* large to fit in the entry. (Note that the */
907 /* size of these entries matters, both for */
908 /* space consumption and for cache utilization. */
909# define OFFSET_TOO_BIG 0xfe
910# define OBJ_INVALID 0xff
911# define MAP_ENTRY(map, bytes) (map)[bytes]
912# define MAP_ENTRIES HBLKSIZE
913# define MAP_SIZE MAP_ENTRIES
914# define CPP_MAX_OFFSET (OFFSET_TOO_BIG - 1)
915# define MAX_OFFSET ((word)CPP_MAX_OFFSET)
916 /* The following are used only if GC_all_interior_ptrs != 0 */
917# define VALID_OFFSET_SZ \
918 (CPP_MAX_OFFSET > WORDS_TO_BYTES(CPP_MAXOBJSZ)? \
919 CPP_MAX_OFFSET+1 \
920 : WORDS_TO_BYTES(CPP_MAXOBJSZ)+1)
921 char _valid_offsets[VALID_OFFSET_SZ];
922 /* GC_valid_offsets[i] == TRUE ==> i */
923 /* is registered as a displacement. */
924# define OFFSET_VALID(displ) \
925 (GC_all_interior_pointers || GC_valid_offsets[displ])
926 char _modws_valid_offsets[sizeof(word)];
927 /* GC_valid_offsets[i] ==> */
928 /* GC_modws_valid_offsets[i%sizeof(word)] */
929# ifdef STUBBORN_ALLOC
930 page_hash_table _changed_pages;
931 /* Stubborn object pages that were changes since last call to */
932 /* GC_read_changed. */
933 page_hash_table _prev_changed_pages;
934 /* Stubborn object pages that were changes before last call to */
935 /* GC_read_changed. */
936# endif
937# if defined(PROC_VDB) || defined(MPROTECT_VDB)
938 page_hash_table _grungy_pages; /* Pages that were dirty at last */
939 /* GC_read_dirty. */
940# endif
941# ifdef MPROTECT_VDB
942 VOLATILE page_hash_table _dirty_pages;
943 /* Pages dirtied since last GC_read_dirty. */
944# endif
945# ifdef PROC_VDB
946 page_hash_table _written_pages; /* Pages ever dirtied */
947# endif
948# ifdef LARGE_CONFIG
949# if CPP_WORDSZ > 32
950# define MAX_HEAP_SECTS 4096 /* overflows at roughly 64 GB */
951# else
952# define MAX_HEAP_SECTS 768 /* Separately added heap sections. */
953# endif
954# else
955# ifdef SMALL_CONFIG
956# define MAX_HEAP_SECTS 128 /* Roughly 1GB */
957# else
958# define MAX_HEAP_SECTS 384 /* Roughly 3GB */
959# endif
960# endif
961 struct HeapSect {
962 ptr_t hs_start; word hs_bytes;
963 } _heap_sects[MAX_HEAP_SECTS];
964# if defined(MSWIN32) || defined(MSWINCE)
965 ptr_t _heap_bases[MAX_HEAP_SECTS];
966 /* Start address of memory regions obtained from kernel. */
967# endif
968# ifdef MSWINCE
969 word _heap_lengths[MAX_HEAP_SECTS];
970 /* Commited lengths of memory regions obtained from kernel. */
971# endif
972 struct roots _static_roots[MAX_ROOT_SETS];
973# if !defined(MSWIN32) && !defined(MSWINCE)
974 struct roots * _root_index[RT_SIZE];
975# endif
976 struct exclusion _excl_table[MAX_EXCLUSIONS];
977 /* Block header index; see gc_headers.h */
978 bottom_index * _all_nils;
979 bottom_index * _top_index [TOP_SZ];
980#ifdef SAVE_CALL_CHAIN
981 struct callinfo _last_stack[NFRAMES]; /* Stack at last garbage collection.*/
982 /* Useful for debugging mysterious */
983 /* object disappearances. */
984 /* In the multithreaded case, we */
985 /* currently only save the calling */
986 /* stack. */
987#endif
988};
989
990GC_API GC_FAR struct _GC_arrays GC_arrays;
991
992# ifndef SEPARATE_GLOBALS
993# define GC_objfreelist GC_arrays._objfreelist
994# define GC_aobjfreelist GC_arrays._aobjfreelist
995# define GC_words_allocd GC_arrays._words_allocd
996# endif
997# define GC_uobjfreelist GC_arrays._uobjfreelist
998# ifdef ATOMIC_UNCOLLECTABLE
999# define GC_auobjfreelist GC_arrays._auobjfreelist
1000# endif
1001# define GC_sobjfreelist GC_arrays._sobjfreelist
1002# define GC_valid_offsets GC_arrays._valid_offsets
1003# define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
1004# ifdef STUBBORN_ALLOC
1005# define GC_changed_pages GC_arrays._changed_pages
1006# define GC_prev_changed_pages GC_arrays._prev_changed_pages
1007# endif
1008# define GC_obj_map GC_arrays._obj_map
1009# define GC_last_heap_addr GC_arrays._last_heap_addr
1010# define GC_prev_heap_addr GC_arrays._prev_heap_addr
1011# define GC_words_wasted GC_arrays._words_wasted
1012# define GC_large_free_bytes GC_arrays._large_free_bytes
1013# define GC_large_allocd_bytes GC_arrays._large_allocd_bytes
1014# define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes
1015# define GC_words_finalized GC_arrays._words_finalized
1016# define GC_non_gc_bytes_at_gc GC_arrays._non_gc_bytes_at_gc
1017# define GC_mem_freed GC_arrays._mem_freed
1018# define GC_finalizer_mem_freed GC_arrays._finalizer_mem_freed
1019# define GC_scratch_end_ptr GC_arrays._scratch_end_ptr
1020# define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr
1021# define GC_mark_procs GC_arrays._mark_procs
1022# define GC_heapsize GC_arrays._heapsize
1023# define GC_max_heapsize GC_arrays._max_heapsize
1024# define GC_requested_heapsize GC_arrays._requested_heapsize
1025# define GC_words_allocd_before_gc GC_arrays._words_allocd_before_gc
1026# define GC_heap_sects GC_arrays._heap_sects
1027# define GC_last_stack GC_arrays._last_stack
1028# ifdef USE_MUNMAP
1029# define GC_unmapped_bytes GC_arrays._unmapped_bytes
1030# endif
1031# if defined(MSWIN32) || defined(MSWINCE)
1032# define GC_heap_bases GC_arrays._heap_bases
1033# endif
1034# ifdef MSWINCE
1035# define GC_heap_lengths GC_arrays._heap_lengths
1036# endif
1037# define GC_static_roots GC_arrays._static_roots
1038# define GC_root_index GC_arrays._root_index
1039# define GC_excl_table GC_arrays._excl_table
1040# define GC_all_nils GC_arrays._all_nils
1041# define GC_top_index GC_arrays._top_index
1042# if defined(PROC_VDB) || defined(MPROTECT_VDB)
1043# define GC_grungy_pages GC_arrays._grungy_pages
1044# endif
1045# ifdef MPROTECT_VDB
1046# define GC_dirty_pages GC_arrays._dirty_pages
1047# endif
1048# ifdef PROC_VDB
1049# define GC_written_pages GC_arrays._written_pages
1050# endif
1051# ifdef GATHERSTATS
1052# define GC_composite_in_use GC_arrays._composite_in_use
1053# define GC_atomic_in_use GC_arrays._atomic_in_use
1054# endif
1055# ifdef MERGE_SIZES
1056# define GC_size_map GC_arrays._size_map
1057# endif
1058
1059# define beginGC_arrays ((ptr_t)(&GC_arrays))
1060# define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
1061
1062#define USED_HEAP_SIZE (GC_heapsize - GC_large_free_bytes)
1063
1064/* Object kinds: */
1065# define MAXOBJKINDS 16
1066
1067extern struct obj_kind {
1068 ptr_t *ok_freelist; /* Array of free listheaders for this kind of object */
1069 /* Point either to GC_arrays or to storage allocated */
1070 /* with GC_scratch_alloc. */
1071 struct hblk **ok_reclaim_list;
1072 /* List headers for lists of blocks waiting to be */
1073 /* swept. */
1074 word ok_descriptor; /* Descriptor template for objects in this */
1075 /* block. */
1076 GC_bool ok_relocate_descr;
1077 /* Add object size in bytes to descriptor */
1078 /* template to obtain descriptor. Otherwise */
1079 /* template is used as is. */
1080 GC_bool ok_init; /* Clear objects before putting them on the free list. */
1081} GC_obj_kinds[MAXOBJKINDS];
1082
1083# define beginGC_obj_kinds ((ptr_t)(&GC_obj_kinds))
1084# define endGC_obj_kinds (beginGC_obj_kinds + (sizeof GC_obj_kinds))
1085
1086/* Variables that used to be in GC_arrays, but need to be accessed by */
1087/* inline allocation code. If they were in GC_arrays, the inlined */
1088/* allocation code would include GC_arrays offsets (as it did), which */
1089/* introduce maintenance problems. */
1090
1091#ifdef SEPARATE_GLOBALS
1092 word GC_words_allocd;
1093 /* Number of words allocated during this collection cycle */
1094 ptr_t GC_objfreelist[MAXOBJSZ+1];
1095 /* free list for NORMAL objects */
1096# define beginGC_objfreelist ((ptr_t)(&GC_objfreelist))
1097# define endGC_objfreelist (beginGC_objfreelist + sizeof(GC_objfreelist))
1098
1099 ptr_t GC_aobjfreelist[MAXOBJSZ+1];
1100 /* free list for atomic (PTRFREE) objs */
1101# define beginGC_aobjfreelist ((ptr_t)(&GC_aobjfreelist))
1102# define endGC_aobjfreelist (beginGC_aobjfreelist + sizeof(GC_aobjfreelist))
1103#endif
1104
1105/* Predefined kinds: */
1106# define PTRFREE 0
1107# define NORMAL 1
1108# define UNCOLLECTABLE 2
1109# ifdef ATOMIC_UNCOLLECTABLE
1110# define AUNCOLLECTABLE 3
1111# define STUBBORN 4
1112# define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE)
1113# else
1114# define STUBBORN 3
1115# define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE)
1116# endif
1117
1118extern int GC_n_kinds;
1119
1120GC_API word GC_fo_entries;
1121
1122extern word GC_n_heap_sects; /* Number of separately added heap */
1123 /* sections. */
1124
1125extern word GC_page_size;
1126
1127# if defined(MSWIN32) || defined(MSWINCE)
1128 struct _SYSTEM_INFO;
1129 extern struct _SYSTEM_INFO GC_sysinfo;
1130 extern word GC_n_heap_bases; /* See GC_heap_bases. */
1131# endif
1132
1133extern word GC_total_stack_black_listed;
1134 /* Number of bytes on stack blacklist. */
1135
1136extern word GC_black_list_spacing;
1137 /* Average number of bytes between blacklisted */
1138 /* blocks. Approximate. */
1139 /* Counts only blocks that are */
1140 /* "stack-blacklisted", i.e. that are */
1141 /* problematic in the interior of an object. */
1142
1143extern map_entry_type * GC_invalid_map;
1144 /* Pointer to the nowhere valid hblk map */
1145 /* Blocks pointing to this map are free. */
1146
1147extern struct hblk * GC_hblkfreelist[];
1148 /* List of completely empty heap blocks */
1149 /* Linked through hb_next field of */
1150 /* header structure associated with */
1151 /* block. */
1152
1153extern GC_bool GC_objects_are_marked; /* There are marked objects in */
1154 /* the heap. */
1155
1156#ifndef SMALL_CONFIG
1157 extern GC_bool GC_incremental;
1158 /* Using incremental/generational collection. */
1159# define TRUE_INCREMENTAL \
1160 (GC_incremental && GC_time_limit != GC_TIME_UNLIMITED)
1161 /* True incremental, not just generational, mode */
1162#else
1163# define GC_incremental FALSE
1164 /* Hopefully allow optimizer to remove some code. */
1165# define TRUE_INCREMENTAL FALSE
1166#endif
1167
1168extern GC_bool GC_dirty_maintained;
1169 /* Dirty bits are being maintained, */
1170 /* either for incremental collection, */
1171 /* or to limit the root set. */
1172
1173extern word GC_root_size; /* Total size of registered root sections */
1174
1175extern GC_bool GC_debugging_started; /* GC_debug_malloc has been called. */
1176
1177extern long GC_large_alloc_warn_interval;
1178 /* Interval between unsuppressed warnings. */
1179
1180extern long GC_large_alloc_warn_suppressed;
1181 /* Number of warnings suppressed so far. */
1182
1183/* Operations */
1184# ifndef abs
1185# define abs(x) ((x) < 0? (-(x)) : (x))
1186# endif
1187
1188
1189/* Marks are in a reserved area in */
1190/* each heap block. Each word has one mark bit associated */
1191/* with it. Only those corresponding to the beginning of an */
1192/* object are used. */
1193
1194/* Set mark bit correctly, even if mark bits may be concurrently */
1195/* accessed. */
1196#ifdef PARALLEL_MARK
1197# define OR_WORD(addr, bits) \
1198 { word old; \
1199 do { \
1200 old = *((volatile word *)addr); \
1201 } while (!GC_compare_and_exchange((addr), old, old | (bits))); \
1202 }
1203# define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \
1204 { word old; \
1205 word my_bits = (bits); \
1206 do { \
1207 old = *((volatile word *)addr); \
1208 if (old & my_bits) goto exit_label; \
1209 } while (!GC_compare_and_exchange((addr), old, old | my_bits)); \
1210 }
1211#else
1212# define OR_WORD(addr, bits) *(addr) |= (bits)
1213# define OR_WORD_EXIT_IF_SET(addr, bits, exit_label) \
1214 { \
1215 word old = *(addr); \
1216 word my_bits = (bits); \
1217 if (old & my_bits) goto exit_label; \
1218 *(addr) = (old | my_bits); \
1219 }
1220#endif
1221
1222/* Mark bit operations */
1223
1224/*
1225 * Retrieve, set, clear the mark bit corresponding
1226 * to the nth word in a given heap block.
1227 *
1228 * (Recall that bit n corresponds to object beginning at word n
1229 * relative to the beginning of the block, including unused words)
1230 */
1231
1232#ifdef USE_MARK_BYTES
1233# define mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n) >> 1])
1234# define set_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n)>>1]) = 1
1235# define clear_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[(n)>>1]) = 0
1236#else /* !USE_MARK_BYTES */
1237# define mark_bit_from_hdr(hhdr,n) (((hhdr)->hb_marks[divWORDSZ(n)] \
1238 >> (modWORDSZ(n))) & (word)1)
1239# define set_mark_bit_from_hdr(hhdr,n) \
1240 OR_WORD((hhdr)->hb_marks+divWORDSZ(n), \
1241 (word)1 << modWORDSZ(n))
1242# define clear_mark_bit_from_hdr(hhdr,n) (hhdr)->hb_marks[divWORDSZ(n)] \
1243 &= ~((word)1 << modWORDSZ(n))
1244#endif /* !USE_MARK_BYTES */
1245
1246/* Important internal collector routines */
1247
1248ptr_t GC_approx_sp GC_PROTO((void));
1249
1250GC_bool GC_should_collect GC_PROTO((void));
1251
1252void GC_apply_to_all_blocks GC_PROTO(( \
1253 void (*fn) GC_PROTO((struct hblk *h, word client_data)), \
1254 word client_data));
1255 /* Invoke fn(hbp, client_data) for each */
1256 /* allocated heap block. */
1257struct hblk * GC_next_used_block GC_PROTO((struct hblk * h));
1258 /* Return first in-use block >= h */
1259struct hblk * GC_prev_block GC_PROTO((struct hblk * h));
1260 /* Return last block <= h. Returned block */
1261 /* is managed by GC, but may or may not be in */
1262 /* use. */
1263void GC_mark_init GC_PROTO((void));
1264void GC_clear_marks GC_PROTO((void)); /* Clear mark bits for all heap objects. */
1265void GC_invalidate_mark_state GC_PROTO((void));
1266 /* Tell the marker that marked */
1267 /* objects may point to unmarked */
1268 /* ones, and roots may point to */
1269 /* unmarked objects. */
1270 /* Reset mark stack. */
1271GC_bool GC_mark_stack_empty GC_PROTO((void));
1272GC_bool GC_mark_some GC_PROTO((ptr_t cold_gc_frame));
1273 /* Perform about one pages worth of marking */
1274 /* work of whatever kind is needed. Returns */
1275 /* quickly if no collection is in progress. */
1276 /* Return TRUE if mark phase finished. */
1277void GC_initiate_gc GC_PROTO((void));
1278 /* initiate collection. */
1279 /* If the mark state is invalid, this */
1280 /* becomes full colleection. Otherwise */
1281 /* it's partial. */
1282void GC_push_all GC_PROTO((ptr_t bottom, ptr_t top));
1283 /* Push everything in a range */
1284 /* onto mark stack. */
1285void GC_push_selected GC_PROTO(( \
1286 ptr_t bottom, \
1287 ptr_t top, \
1288 int (*dirty_fn) GC_PROTO((struct hblk *h)), \
1289 void (*push_fn) GC_PROTO((ptr_t bottom, ptr_t top)) ));
1290 /* Push all pages h in [b,t) s.t. */
1291 /* select_fn(h) != 0 onto mark stack. */
1292#ifndef SMALL_CONFIG
1293 void GC_push_conditional GC_PROTO((ptr_t b, ptr_t t, GC_bool all));
1294#else
1295# define GC_push_conditional(b, t, all) GC_push_all(b, t)
1296#endif
1297 /* Do either of the above, depending */
1298 /* on the third arg. */
1299void GC_push_all_stack GC_PROTO((ptr_t b, ptr_t t));
1300 /* As above, but consider */
1301 /* interior pointers as valid */
1302void GC_push_all_eager GC_PROTO((ptr_t b, ptr_t t));
1303 /* Same as GC_push_all_stack, but */
1304 /* ensures that stack is scanned */
1305 /* immediately, not just scheduled */
1306 /* for scanning. */
1307#ifndef THREADS
1308 void GC_push_all_stack_partially_eager GC_PROTO(( \
1309 ptr_t bottom, ptr_t top, ptr_t cold_gc_frame ));
1310 /* Similar to GC_push_all_eager, but only the */
1311 /* part hotter than cold_gc_frame is scanned */
1312 /* immediately. Needed to ensure that callee- */
1313 /* save registers are not missed. */
1314#else
1315 /* In the threads case, we push part of the current thread stack */
1316 /* with GC_push_all_eager when we push the registers. This gets the */
1317 /* callee-save registers that may disappear. The remainder of the */
1318 /* stacks are scheduled for scanning in *GC_push_other_roots, which */
1319 /* is thread-package-specific. */
1320#endif
1321void GC_push_current_stack GC_PROTO((ptr_t cold_gc_frame));
1322 /* Push enough of the current stack eagerly to */
1323 /* ensure that callee-save registers saved in */
1324 /* GC frames are scanned. */
1325 /* In the non-threads case, schedule entire */
1326 /* stack for scanning. */
1327void GC_push_roots GC_PROTO((GC_bool all, ptr_t cold_gc_frame));
1328 /* Push all or dirty roots. */
1329extern void (*GC_push_other_roots) GC_PROTO((void));
1330 /* Push system or application specific roots */
1331 /* onto the mark stack. In some environments */
1332 /* (e.g. threads environments) this is */
1333 /* predfined to be non-zero. A client supplied */
1334 /* replacement should also call the original */
1335 /* function. */
1336extern void GC_push_gc_structures GC_PROTO((void));
1337 /* Push GC internal roots. These are normally */
1338 /* included in the static data segment, and */
1339 /* Thus implicitly pushed. But we must do this */
1340 /* explicitly if normal root processing is */
1341 /* disabled. Calls the following: */
1342 extern void GC_push_finalizer_structures GC_PROTO((void));
1343 extern void GC_push_stubborn_structures GC_PROTO((void));
1344# ifdef THREADS
1345 extern void GC_push_thread_structures GC_PROTO((void));
1346# endif
1347extern void (*GC_start_call_back) GC_PROTO((void));
1348 /* Called at start of full collections. */
1349 /* Not called if 0. Called with allocation */
1350 /* lock held. */
1351 /* 0 by default. */
1352# if defined(USE_GENERIC_PUSH_REGS)
1353 void GC_generic_push_regs GC_PROTO((ptr_t cold_gc_frame));
1354# else
1355 void GC_push_regs GC_PROTO((void));
1356# endif
1357 /* Push register contents onto mark stack. */
1358 /* If NURSERY is defined, the default push */
1359 /* action can be overridden with GC_push_proc */
1360
1361# ifdef NURSERY
1362 extern void (*GC_push_proc)(ptr_t);
1363# endif
1364# if defined(MSWIN32) || defined(MSWINCE)
1365 void __cdecl GC_push_one GC_PROTO((word p));
1366# else
1367 void GC_push_one GC_PROTO((word p));
1368 /* If p points to an object, mark it */
1369 /* and push contents on the mark stack */
1370 /* Pointer recognition test always */
1371 /* accepts interior pointers, i.e. this */
1372 /* is appropriate for pointers found on */
1373 /* stack. */
1374# endif
1375# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1376 void GC_mark_and_push_stack GC_PROTO((word p, ptr_t source));
1377 /* Ditto, omits plausibility test */
1378# else
1379 void GC_mark_and_push_stack GC_PROTO((word p));
1380# endif
1381void GC_push_marked GC_PROTO((struct hblk * h, hdr * hhdr));
1382 /* Push contents of all marked objects in h onto */
1383 /* mark stack. */
1384#ifdef SMALL_CONFIG
1385# define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
1386#else
1387 struct hblk * GC_push_next_marked_dirty GC_PROTO((struct hblk * h));
1388 /* Invoke GC_push_marked on next dirty block above h. */
1389 /* Return a pointer just past the end of this block. */
1390#endif /* !SMALL_CONFIG */
1391struct hblk * GC_push_next_marked GC_PROTO((struct hblk * h));
1392 /* Ditto, but also mark from clean pages. */
1393struct hblk * GC_push_next_marked_uncollectable GC_PROTO((struct hblk * h));
1394 /* Ditto, but mark only from uncollectable pages. */
1395GC_bool GC_stopped_mark GC_PROTO((GC_stop_func stop_func));
1396 /* Stop world and mark from all roots */
1397 /* and rescuers. */
1398void GC_clear_hdr_marks GC_PROTO((hdr * hhdr));
1399 /* Clear the mark bits in a header */
1400void GC_set_hdr_marks GC_PROTO((hdr * hhdr));
1401 /* Set the mark bits in a header */
1402void GC_set_fl_marks GC_PROTO((ptr_t p));
1403 /* Set all mark bits associated with */
1404 /* a free list. */
1405void GC_add_roots_inner GC_PROTO((char * b, char * e, GC_bool tmp));
1406GC_bool GC_is_static_root GC_PROTO((ptr_t p));
1407 /* Is the address p in one of the registered static */
1408 /* root sections? */
1409# if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
1410GC_bool GC_is_tmp_root GC_PROTO((ptr_t p));
1411 /* Is the address p in one of the temporary static */
1412 /* root sections? */
1413# endif
1414void GC_register_dynamic_libraries GC_PROTO((void));
1415 /* Add dynamic library data sections to the root set. */
1416
1417GC_bool GC_register_main_static_data GC_PROTO((void));
1418 /* We need to register the main data segment. Returns */
1419 /* TRUE unless this is done implicitly as part of */
1420 /* dynamic library registration. */
1421
1422/* Machine dependent startup routines */
1423ptr_t GC_get_stack_base GC_PROTO((void)); /* Cold end of stack */
1424#ifdef IA64
1425 ptr_t GC_get_register_stack_base GC_PROTO((void));
1426 /* Cold end of register stack. */
1427#endif
1428void GC_register_data_segments GC_PROTO((void));
1429
1430/* Black listing: */
1431void GC_bl_init GC_PROTO((void));
1432# ifdef PRINT_BLACK_LIST
1433 void GC_add_to_black_list_normal GC_PROTO((word p, ptr_t source));
1434 /* Register bits as a possible future false */
1435 /* reference from the heap or static data */
1436# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
1437 if (GC_all_interior_pointers) { \
1438 GC_add_to_black_list_stack(bits, (ptr_t)(source)); \
1439 } else { \
1440 GC_add_to_black_list_normal(bits, (ptr_t)(source)); \
1441 }
1442# else
1443 void GC_add_to_black_list_normal GC_PROTO((word p));
1444# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
1445 if (GC_all_interior_pointers) { \
1446 GC_add_to_black_list_stack(bits); \
1447 } else { \
1448 GC_add_to_black_list_normal(bits); \
1449 }
1450# endif
1451
1452# ifdef PRINT_BLACK_LIST
1453 void GC_add_to_black_list_stack GC_PROTO((word p, ptr_t source));
1454# else
1455 void GC_add_to_black_list_stack GC_PROTO((word p));
1456# endif
1457struct hblk * GC_is_black_listed GC_PROTO((struct hblk * h, word len));
1458 /* If there are likely to be false references */
1459 /* to a block starting at h of the indicated */
1460 /* length, then return the next plausible */
1461 /* starting location for h that might avoid */
1462 /* these false references. */
1463void GC_promote_black_lists GC_PROTO((void));
1464 /* Declare an end to a black listing phase. */
1465void GC_unpromote_black_lists GC_PROTO((void));
1466 /* Approximately undo the effect of the above. */
1467 /* This actually loses some information, but */
1468 /* only in a reasonably safe way. */
1469word GC_number_stack_black_listed GC_PROTO(( \
1470 struct hblk *start, struct hblk *endp1));
1471 /* Return the number of (stack) blacklisted */
1472 /* blocks in the range for statistical */
1473 /* purposes. */
1474
1475ptr_t GC_scratch_alloc GC_PROTO((word bytes));
1476 /* GC internal memory allocation for */
1477 /* small objects. Deallocation is not */
1478 /* possible. */
1479
1480/* Heap block layout maps: */
1481void GC_invalidate_map GC_PROTO((hdr * hhdr));
1482 /* Remove the object map associated */
1483 /* with the block. This identifies */
1484 /* the block as invalid to the mark */
1485 /* routines. */
1486GC_bool GC_add_map_entry GC_PROTO((word sz));
1487 /* Add a heap block map for objects of */
1488 /* size sz to obj_map. */
1489 /* Return FALSE on failure. */
1490void GC_register_displacement_inner GC_PROTO((word offset));
1491 /* Version of GC_register_displacement */
1492 /* that assumes lock is already held */
1493 /* and signals are already disabled. */
1494
1495/* hblk allocation: */
1496void GC_new_hblk GC_PROTO((word size_in_words, int kind));
1497 /* Allocate a new heap block, and build */
1498 /* a free list in it. */
1499
1500ptr_t GC_build_fl GC_PROTO((struct hblk *h, word sz,
1501 GC_bool clear, ptr_t list));
1502 /* Build a free list for objects of */
1503 /* size sz in block h. Append list to */
1504 /* end of the free lists. Possibly */
1505 /* clear objects on the list. Normally */
1506 /* called by GC_new_hblk, but also */
1507 /* called explicitly without GC lock. */
1508
1509struct hblk * GC_allochblk GC_PROTO(( \
1510 word size_in_words, int kind, unsigned flags));
1511 /* Allocate a heap block, inform */
1512 /* the marker that block is valid */
1513 /* for objects of indicated size. */
1514
1515ptr_t GC_alloc_large GC_PROTO((word lw, int k, unsigned flags));
1516 /* Allocate a large block of size lw words. */
1517 /* The block is not cleared. */
1518 /* Flags is 0 or IGNORE_OFF_PAGE. */
1519 /* Calls GC_allchblk to do the actual */
1520 /* allocation, but also triggers GC and/or */
1521 /* heap expansion as appropriate. */
1522 /* Does not update GC_words_allocd, but does */
1523 /* other accounting. */
1524
1525ptr_t GC_alloc_large_and_clear GC_PROTO((word lw, int k, unsigned flags));
1526 /* As above, but clear block if appropriate */
1527 /* for kind k. */
1528
1529void GC_freehblk GC_PROTO((struct hblk * p));
1530 /* Deallocate a heap block and mark it */
1531 /* as invalid. */
1532
1533/* Misc GC: */
1534void GC_init_inner GC_PROTO((void));
1535GC_bool GC_expand_hp_inner GC_PROTO((word n));
1536void GC_start_reclaim GC_PROTO((int abort_if_found));
1537 /* Restore unmarked objects to free */
1538 /* lists, or (if abort_if_found is */
1539 /* TRUE) report them. */
1540 /* Sweeping of small object pages is */
1541 /* largely deferred. */
1542void GC_continue_reclaim GC_PROTO((word sz, int kind));
1543 /* Sweep pages of the given size and */
1544 /* kind, as long as possible, and */
1545 /* as long as the corr. free list is */
1546 /* empty. */
1547void GC_reclaim_or_delete_all GC_PROTO((void));
1548 /* Arrange for all reclaim lists to be */
1549 /* empty. Judiciously choose between */
1550 /* sweeping and discarding each page. */
1551GC_bool GC_reclaim_all GC_PROTO((GC_stop_func stop_func, GC_bool ignore_old));
1552 /* Reclaim all blocks. Abort (in a */
1553 /* consistent state) if f returns TRUE. */
1554GC_bool GC_block_empty GC_PROTO((hdr * hhdr));
1555 /* Block completely unmarked? */
1556GC_bool GC_never_stop_func GC_PROTO((void));
1557 /* Returns FALSE. */
1558GC_bool GC_try_to_collect_inner GC_PROTO((GC_stop_func f));
1559
1560 /* Collect; caller must have acquired */
1561 /* lock and disabled signals. */
1562 /* Collection is aborted if f returns */
1563 /* TRUE. Returns TRUE if it completes */
1564 /* successfully. */
1565# define GC_gcollect_inner() \
1566 (void) GC_try_to_collect_inner(GC_never_stop_func)
1567void GC_finish_collection GC_PROTO((void));
1568 /* Finish collection. Mark bits are */
1569 /* consistent and lock is still held. */
1570GC_bool GC_collect_or_expand GC_PROTO(( \
1571 word needed_blocks, GC_bool ignore_off_page));
1572 /* Collect or expand heap in an attempt */
1573 /* make the indicated number of free */
1574 /* blocks available. Should be called */
1575 /* until the blocks are available or */
1576 /* until it fails by returning FALSE. */
1577
1578extern GC_bool GC_is_initialized; /* GC_init() has been run. */
1579
1580#if defined(MSWIN32) || defined(MSWINCE)
1581 void GC_deinit GC_PROTO((void));
1582 /* Free any resources allocated by */
1583 /* GC_init */
1584#endif
1585
1586void GC_collect_a_little_inner GC_PROTO((int n));
1587 /* Do n units worth of garbage */
1588 /* collection work, if appropriate. */
1589 /* A unit is an amount appropriate for */
1590 /* HBLKSIZE bytes of allocation. */
1591ptr_t GC_generic_malloc GC_PROTO((word lb, int k));
1592 /* Allocate an object of the given */
1593 /* kind. By default, there are only */
1594 /* a few kinds: composite(pointerfree), */
1595 /* atomic, uncollectable, etc. */
1596 /* We claim it's possible for clever */
1597 /* client code that understands GC */
1598 /* internals to add more, e.g. to */
1599 /* communicate object layout info */
1600 /* to the collector. */
1601ptr_t GC_generic_malloc_ignore_off_page GC_PROTO((size_t b, int k));
1602 /* As above, but pointers past the */
1603 /* first page of the resulting object */
1604 /* are ignored. */
1605ptr_t GC_generic_malloc_inner GC_PROTO((word lb, int k));
1606 /* Ditto, but I already hold lock, etc. */
1607ptr_t GC_generic_malloc_words_small GC_PROTO((size_t lw, int k));
1608 /* As above, but size in units of words */
1609 /* Bypasses MERGE_SIZES. Assumes */
1610 /* words <= MAXOBJSZ. */
1611ptr_t GC_generic_malloc_inner_ignore_off_page GC_PROTO((size_t lb, int k));
1612 /* Allocate an object, where */
1613 /* the client guarantees that there */
1614 /* will always be a pointer to the */
1615 /* beginning of the object while the */
1616 /* object is live. */
1617ptr_t GC_allocobj GC_PROTO((word sz, int kind));
1618 /* Make the indicated */
1619 /* free list nonempty, and return its */
1620 /* head. */
1621
1622void GC_init_headers GC_PROTO((void));
1623struct hblkhdr * GC_install_header GC_PROTO((struct hblk *h));
1624 /* Install a header for block h. */
1625 /* Return 0 on failure, or the header */
1626 /* otherwise. */
1627GC_bool GC_install_counts GC_PROTO((struct hblk * h, word sz));
1628 /* Set up forwarding counts for block */
1629 /* h of size sz. */
1630 /* Return FALSE on failure. */
1631void GC_remove_header GC_PROTO((struct hblk * h));
1632 /* Remove the header for block h. */
1633void GC_remove_counts GC_PROTO((struct hblk * h, word sz));
1634 /* Remove forwarding counts for h. */
1635hdr * GC_find_header GC_PROTO((ptr_t h)); /* Debugging only. */
1636
1637void GC_finalize GC_PROTO((void));
1638 /* Perform all indicated finalization actions */
1639 /* on unmarked objects. */
1640 /* Unreachable finalizable objects are enqueued */
1641 /* for processing by GC_invoke_finalizers. */
1642 /* Invoked with lock. */
1643
1644void GC_notify_or_invoke_finalizers GC_PROTO((void));
1645 /* If GC_finalize_on_demand is not set, invoke */
1646 /* eligible finalizers. Otherwise: */
1647 /* Call *GC_finalizer_notifier if there are */
1648 /* finalizers to be run, and we haven't called */
1649 /* this procedure yet this GC cycle. */
1650
1651GC_API GC_PTR GC_make_closure GC_PROTO((GC_finalization_proc fn, GC_PTR data));
1652GC_API void GC_debug_invoke_finalizer GC_PROTO((GC_PTR obj, GC_PTR data));
1653 /* Auxiliary fns to make finalization work */
1654 /* correctly with displaced pointers introduced */
1655 /* by the debugging allocators. */
1656
1657void GC_add_to_heap GC_PROTO((struct hblk *p, word bytes));
1658 /* Add a HBLKSIZE aligned chunk to the heap. */
1659
1660void GC_print_obj GC_PROTO((ptr_t p));
1661 /* P points to somewhere inside an object with */
1662 /* debugging info. Print a human readable */
1663 /* description of the object to stderr. */
1664extern void (*GC_check_heap) GC_PROTO((void));
1665 /* Check that all objects in the heap with */
1666 /* debugging info are intact. */
1667 /* Add any that are not to GC_smashed list. */
1668extern void (*GC_print_all_smashed) GC_PROTO((void));
1669 /* Print GC_smashed if it's not empty. */
1670 /* Clear GC_smashed list. */
1671extern void GC_print_all_errors GC_PROTO((void));
1672 /* Print smashed and leaked objects, if any. */
1673 /* Clear the lists of such objects. */
1674extern void (*GC_print_heap_obj) GC_PROTO((ptr_t p));
1675 /* If possible print s followed by a more */
1676 /* detailed description of the object */
1677 /* referred to by p. */
1678#if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
1679 void GC_print_address_map GC_PROTO((void));
1680 /* Print an address map of the process. */
1681#endif
1682
1683extern GC_bool GC_have_errors; /* We saw a smashed or leaked object. */
1684 /* Call error printing routine */
1685 /* occasionally. */
1686extern GC_bool GC_print_stats; /* Produce at least some logging output */
1687 /* Set from environment variable. */
1688
1689#ifndef NO_DEBUGGING
1690 extern GC_bool GC_dump_regularly; /* Generate regular debugging dumps. */
1691# define COND_DUMP if (GC_dump_regularly) GC_dump();
1692#else
1693# define COND_DUMP
1694#endif
1695
1696/* Macros used for collector internal allocation. */
1697/* These assume the collector lock is held. */
1698#ifdef DBG_HDRS_ALL
1699 extern GC_PTR GC_debug_generic_malloc_inner(size_t lb, int k);
1700 extern GC_PTR GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
1701 int k);
1702# define GC_INTERNAL_MALLOC GC_debug_generic_malloc_inner
1703# define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
1704 GC_debug_generic_malloc_inner_ignore_off_page
1705# ifdef THREADS
1706# define GC_INTERNAL_FREE GC_debug_free_inner
1707# else
1708# define GC_INTERNAL_FREE GC_debug_free
1709# endif
1710#else
1711# define GC_INTERNAL_MALLOC GC_generic_malloc_inner
1712# define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
1713 GC_generic_malloc_inner_ignore_off_page
1714# ifdef THREADS
1715# define GC_INTERNAL_FREE GC_free_inner
1716# else
1717# define GC_INTERNAL_FREE GC_free
1718# endif
1719#endif
1720
1721/* Memory unmapping: */
1722#ifdef USE_MUNMAP
1723 void GC_unmap_old(void);
1724 void GC_merge_unmapped(void);
1725 void GC_unmap(ptr_t start, word bytes);
1726 void GC_remap(ptr_t start, word bytes);
1727 void GC_unmap_gap(ptr_t start1, word bytes1, ptr_t start2, word bytes2);
1728#endif
1729
1730/* Virtual dirty bit implementation: */
1731/* Each implementation exports the following: */
1732void GC_read_dirty GC_PROTO((void));
1733 /* Retrieve dirty bits. */
1734GC_bool GC_page_was_dirty GC_PROTO((struct hblk *h));
1735 /* Read retrieved dirty bits. */
1736GC_bool GC_page_was_ever_dirty GC_PROTO((struct hblk *h));
1737 /* Could the page contain valid heap pointers? */
1738void GC_is_fresh GC_PROTO((struct hblk *h, word n));
1739 /* Assert the region currently contains no */
1740 /* valid pointers. */
1741void GC_remove_protection GC_PROTO((struct hblk *h, word nblocks,
1742 GC_bool pointerfree));
1743 /* h is about to be writteni or allocated. Ensure */
1744 /* that it's not write protected by the virtual */
1745 /* dirty bit implementation. */
1746
1747void GC_dirty_init GC_PROTO((void));
1748
1749/* Slow/general mark bit manipulation: */
1750GC_API GC_bool GC_is_marked GC_PROTO((ptr_t p));
1751void GC_clear_mark_bit GC_PROTO((ptr_t p));
1752void GC_set_mark_bit GC_PROTO((ptr_t p));
1753
1754/* Stubborn objects: */
1755void GC_read_changed GC_PROTO((void)); /* Analogous to GC_read_dirty */
1756GC_bool GC_page_was_changed GC_PROTO((struct hblk * h));
1757 /* Analogous to GC_page_was_dirty */
1758void GC_clean_changing_list GC_PROTO((void));
1759 /* Collect obsolete changing list entries */
1760void GC_stubborn_init GC_PROTO((void));
1761
1762/* Debugging print routines: */
1763void GC_print_block_list GC_PROTO((void));
1764void GC_print_hblkfreelist GC_PROTO((void));
1765void GC_print_heap_sects GC_PROTO((void));
1766void GC_print_static_roots GC_PROTO((void));
1767void GC_print_finalization_stats GC_PROTO((void));
1768void GC_dump GC_PROTO((void));
1769
1770#ifdef KEEP_BACK_PTRS
1771 void GC_store_back_pointer(ptr_t source, ptr_t dest);
1772 void GC_marked_for_finalization(ptr_t dest);
1773# define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
1774# define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
1775#else
1776# define GC_STORE_BACK_PTR(source, dest)
1777# define GC_MARKED_FOR_FINALIZATION(dest)
1778#endif
1779
1780/* Make arguments appear live to compiler */
1781# ifdef __WATCOMC__
1782 void GC_noop(void*, ...);
1783# else
1784# ifdef __DMC__
1785 GC_API void GC_noop(...);
1786# else
1787 GC_API void GC_noop();
1788# endif
1789# endif
1790
1791void GC_noop1 GC_PROTO((word));
1792
1793/* Logging and diagnostic output: */
1794GC_API void GC_printf GC_PROTO((GC_CONST char * format, long, long, long, long, long, long));
1795 /* A version of printf that doesn't allocate, */
1796 /* is restricted to long arguments, and */
1797 /* (unfortunately) doesn't use varargs for */
1798 /* portability. Restricted to 6 args and */
1799 /* 1K total output length. */
1800 /* (We use sprintf. Hopefully that doesn't */
1801 /* allocate for long arguments.) */
1802# define GC_printf0(f) GC_printf(f, 0l, 0l, 0l, 0l, 0l, 0l)
1803# define GC_printf1(f,a) GC_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
1804# define GC_printf2(f,a,b) GC_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
1805# define GC_printf3(f,a,b,c) GC_printf(f, (long)a, (long)b, (long)c, 0l, 0l, 0l)
1806# define GC_printf4(f,a,b,c,d) GC_printf(f, (long)a, (long)b, (long)c, \
1807 (long)d, 0l, 0l)
1808# define GC_printf5(f,a,b,c,d,e) GC_printf(f, (long)a, (long)b, (long)c, \
1809 (long)d, (long)e, 0l)
1810# define GC_printf6(f,a,b,c,d,e,g) GC_printf(f, (long)a, (long)b, (long)c, \
1811 (long)d, (long)e, (long)g)
1812
1813GC_API void GC_err_printf GC_PROTO((GC_CONST char * format, long, long, long, long, long, long));
1814# define GC_err_printf0(f) GC_err_puts(f)
1815# define GC_err_printf1(f,a) GC_err_printf(f, (long)a, 0l, 0l, 0l, 0l, 0l)
1816# define GC_err_printf2(f,a,b) GC_err_printf(f, (long)a, (long)b, 0l, 0l, 0l, 0l)
1817# define GC_err_printf3(f,a,b,c) GC_err_printf(f, (long)a, (long)b, (long)c, \
1818 0l, 0l, 0l)
1819# define GC_err_printf4(f,a,b,c,d) GC_err_printf(f, (long)a, (long)b, \
1820 (long)c, (long)d, 0l, 0l)
1821# define GC_err_printf5(f,a,b,c,d,e) GC_err_printf(f, (long)a, (long)b, \
1822 (long)c, (long)d, \
1823 (long)e, 0l)
1824# define GC_err_printf6(f,a,b,c,d,e,g) GC_err_printf(f, (long)a, (long)b, \
1825 (long)c, (long)d, \
1826 (long)e, (long)g)
1827 /* Ditto, writes to stderr. */
1828
1829void GC_err_puts GC_PROTO((GC_CONST char *s));
1830 /* Write s to stderr, don't buffer, don't add */
1831 /* newlines, don't ... */
1832
1833#if defined(LINUX) && !defined(SMALL_CONFIG)
1834 void GC_err_write GC_PROTO((GC_CONST char *buf, size_t len));
1835 /* Write buf to stderr, don't buffer, don't add */
1836 /* newlines, don't ... */
1837#endif
1838
1839
1840# ifdef GC_ASSERTIONS
1841# define GC_ASSERT(expr) if(!(expr)) {\
1842 GC_err_printf2("Assertion failure: %s:%ld\n", \
1843 __FILE__, (unsigned long)__LINE__); \
1844 ABORT("assertion failure"); }
1845# else
1846# define GC_ASSERT(expr)
1847# endif
1848
1849# if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1850 /* We need additional synchronization facilities from the thread */
1851 /* support. We believe these are less performance critical */
1852 /* than the main garbage collector lock; standard pthreads-based */
1853 /* implementations should be sufficient. */
1854
1855 /* The mark lock and condition variable. If the GC lock is also */
1856 /* acquired, the GC lock must be acquired first. The mark lock is */
1857 /* used to both protect some variables used by the parallel */
1858 /* marker, and to protect GC_fl_builder_count, below. */
1859 /* GC_notify_all_marker() is called when */
1860 /* the state of the parallel marker changes */
1861 /* in some significant way (see gc_mark.h for details). The */
1862 /* latter set of events includes incrementing GC_mark_no. */
1863 /* GC_notify_all_builder() is called when GC_fl_builder_count */
1864 /* reaches 0. */
1865
1866 extern void GC_acquire_mark_lock();
1867 extern void GC_release_mark_lock();
1868 extern void GC_notify_all_builder();
1869 /* extern void GC_wait_builder(); */
1870 extern void GC_wait_for_reclaim();
1871
1872 extern word GC_fl_builder_count; /* Protected by mark lock. */
1873# endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1874# ifdef PARALLEL_MARK
1875 extern void GC_notify_all_marker();
1876 extern void GC_wait_marker();
1877 extern word GC_mark_no; /* Protected by mark lock. */
1878
1879 extern void GC_help_marker(word my_mark_no);
1880 /* Try to help out parallel marker for mark cycle */
1881 /* my_mark_no. Returns if the mark cycle finishes or */
1882 /* was already done, or there was nothing to do for */
1883 /* some other reason. */
1884# endif /* PARALLEL_MARK */
1885
1886# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS)
1887 /* We define the thread suspension signal here, so that we can refer */
1888 /* to it in the dirty bit implementation, if necessary. Ideally we */
1889 /* would allocate a (real-time ?) signal using the standard mechanism.*/
1890 /* unfortunately, there is no standard mechanism. (There is one */
1891 /* in Linux glibc, but it's not exported.) Thus we continue to use */
1892 /* the same hard-coded signals we've always used. */
1893# if !defined(SIG_SUSPEND)
1894# if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
1895# if defined(SPARC) && !defined(SIGPWR)
1896 /* SPARC/Linux doesn't properly define SIGPWR in <signal.h>.
1897 * It is aliased to SIGLOST in asm/signal.h, though. */
1898# define SIG_SUSPEND SIGLOST
1899# else
1900 /* Linuxthreads itself uses SIGUSR1 and SIGUSR2. */
1901# define SIG_SUSPEND SIGPWR
1902# endif
1903# else /* !GC_LINUX_THREADS */
1904# if defined(_SIGRTMIN)
1905# define SIG_SUSPEND _SIGRTMIN + 6
1906# else
1907# define SIG_SUSPEND SIGRTMIN + 6
1908# endif
1909# endif
1910# endif /* !SIG_SUSPEND */
1911
1912# endif
1913
1914# endif /* GC_PRIVATE_H */
diff --git a/gc/include/private/gcconfig.h b/gc/include/private/gcconfig.h
deleted file mode 100644
index 2290d3d10ea..00000000000
--- a/gc/include/private/gcconfig.h
+++ /dev/null
@@ -1,2085 +0,0 @@
1/*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16
17/*
18 * This header is private to the gc. It is almost always included from
19 * gc_priv.h. However it is possible to include it by itself if just the
20 * configuration macros are needed. In that
21 * case, a few declarations relying on types declared in gc_priv.h will be
22 * omitted.
23 */
24
25#ifndef GCCONFIG_H
26
27# define GCCONFIG_H
28
29/* Machine dependent parameters. Some tuning parameters can be found */
30/* near the top of gc_private.h. */
31
32/* Machine specific parts contributed by various people. See README file. */
33
34/* First a unified test for Linux: */
35# if defined(linux) || defined(__linux__)
36# define LINUX
37# endif
38
39/* And one for NetBSD: */
40# if defined(__NetBSD__)
41# define NETBSD
42# endif
43
44/* And one for OpenBSD: */
45# if defined(__OpenBSD__)
46# define OPENBSD
47# endif
48
49/* And one for FreeBSD: */
50# if defined(__FreeBSD__)
51# define FREEBSD
52# endif
53
54/* Determine the machine type: */
55# if defined(__XSCALE__)
56# define ARM32
57# if !defined(LINUX)
58# define NOSYS
59# define mach_type_known
60# endif
61# endif
62# if defined(sun) && defined(mc68000)
63# define M68K
64# define SUNOS4
65# define mach_type_known
66# endif
67# if defined(hp9000s300)
68# define M68K
69# define HP
70# define mach_type_known
71# endif
72# if defined(OPENBSD) && defined(m68k)
73# define M68K
74# define mach_type_known
75# endif
76# if defined(OPENBSD) && defined(__sparc__)
77# define SPARC
78# define mach_type_known
79# endif
80# if defined(NETBSD) && defined(m68k)
81# define M68K
82# define mach_type_known
83# endif
84# if defined(NETBSD) && defined(__powerpc__)
85# define POWERPC
86# define mach_type_known
87# endif
88# if defined(NETBSD) && defined(__arm32__)
89# define ARM32
90# define mach_type_known
91# endif
92# if defined(vax)
93# define VAX
94# ifdef ultrix
95# define ULTRIX
96# else
97# define BSD
98# endif
99# define mach_type_known
100# endif
101# if defined(mips) || defined(__mips) || defined(_mips)
102# define MIPS
103# if defined(nec_ews) || defined(_nec_ews)
104# define EWS4800
105# endif
106# if !defined(LINUX) && !defined(EWS4800)
107# if defined(ultrix) || defined(__ultrix) || defined(__NetBSD__)
108# define ULTRIX
109# else
110# if defined(_SYSTYPE_SVR4) || defined(SYSTYPE_SVR4) \
111 || defined(__SYSTYPE_SVR4__)
112# define IRIX5 /* or IRIX 6.X */
113# else
114# define RISCOS /* or IRIX 4.X */
115# endif
116# endif
117# endif /* !LINUX */
118# if defined(__NetBSD__) && defined(__MIPSEL__)
119# undef ULTRIX
120# endif
121# define mach_type_known
122# endif
123# if defined(DGUX) && (defined(i386) || defined(__i386__))
124# define I386
125# ifndef _USING_DGUX
126# define _USING_DGUX
127# endif
128# define mach_type_known
129# endif
130# if defined(sequent) && (defined(i386) || defined(__i386__))
131# define I386
132# define SEQUENT
133# define mach_type_known
134# endif
135# if defined(sun) && (defined(i386) || defined(__i386__))
136# define I386
137# define SUNOS5
138# define mach_type_known
139# endif
140# if (defined(__OS2__) || defined(__EMX__)) && defined(__32BIT__)
141# define I386
142# define OS2
143# define mach_type_known
144# endif
145# if defined(ibm032)
146# define RT
147# define mach_type_known
148# endif
149# if defined(sun) && (defined(sparc) || defined(__sparc))
150# define SPARC
151 /* Test for SunOS 5.x */
152# include <errno.h>
153# ifdef ECHRNG
154# define SUNOS5
155# else
156# define SUNOS4
157# endif
158# define mach_type_known
159# endif
160# if defined(sparc) && defined(unix) && !defined(sun) && !defined(linux) \
161 && !defined(__OpenBSD__) && !(__NetBSD__)
162# define SPARC
163# define DRSNX
164# define mach_type_known
165# endif
166# if defined(_IBMR2)
167# define RS6000
168# define mach_type_known
169# endif
170# if defined(__NetBSD__) && defined(__sparc__)
171# define SPARC
172# define mach_type_known
173# endif
174# if defined(_M_XENIX) && defined(_M_SYSV) && defined(_M_I386)
175 /* The above test may need refinement */
176# define I386
177# if defined(_SCO_ELF)
178# define SCO_ELF
179# else
180# define SCO
181# endif
182# define mach_type_known
183# endif
184# if defined(_AUX_SOURCE)
185# define M68K
186# define SYSV
187# define mach_type_known
188# endif
189# if defined(_PA_RISC1_0) || defined(_PA_RISC1_1) || defined(_PA_RISC2_0) \
190 || defined(hppa) || defined(__hppa__)
191# define HP_PA
192# ifndef LINUX
193# define HPUX
194# endif
195# define mach_type_known
196# endif
197# if defined(__ia64) && defined(_HPUX_SOURCE)
198# define IA64
199# define HPUX
200# define mach_type_known
201# endif
202# if defined(__BEOS__) && defined(_X86_)
203# define I386
204# define BEOS
205# define mach_type_known
206# endif
207# if defined(LINUX) && (defined(i386) || defined(__i386__))
208# define I386
209# define mach_type_known
210# endif
211# if defined(LINUX) && defined(__x86_64__)
212# define X86_64
213# define mach_type_known
214# endif
215# if defined(LINUX) && (defined(__ia64__) || defined(__ia64))
216# define IA64
217# define mach_type_known
218# endif
219# if defined(LINUX) && defined(__arm__)
220# define ARM32
221# define mach_type_known
222# endif
223# if defined(LINUX) && (defined(powerpc) || defined(__powerpc__))
224# define POWERPC
225# define mach_type_known
226# endif
227# if defined(LINUX) && defined(__mc68000__)
228# define M68K
229# define mach_type_known
230# endif
231# if defined(LINUX) && (defined(sparc) || defined(__sparc__))
232# define SPARC
233# define mach_type_known
234# endif
235# if defined(LINUX) && defined(__arm__)
236# define ARM32
237# define mach_type_known
238# endif
239# if defined(LINUX) && defined(__sh__)
240# define SH
241# define mach_type_known
242# endif
243# if defined(__alpha) || defined(__alpha__)
244# define ALPHA
245# if !defined(LINUX) && !defined(NETBSD) && !defined(OPENBSD) && !defined(FREEBSD)
246# define OSF1 /* a.k.a Digital Unix */
247# endif
248# define mach_type_known
249# endif
250# if defined(_AMIGA) && !defined(AMIGA)
251# define AMIGA
252# endif
253# ifdef AMIGA
254# define M68K
255# define mach_type_known
256# endif
257# if defined(THINK_C) || defined(__MWERKS__) && !defined(__powerc)
258# define M68K
259# define MACOS
260# define mach_type_known
261# endif
262# if defined(__MWERKS__) && defined(__powerc) && !defined(__MACH__)
263# define POWERPC
264# define MACOS
265# define mach_type_known
266# endif
267# if defined(macosx) || \
268 defined(__APPLE__) && defined(__MACH__) && defined(__ppc__)
269# define MACOSX
270# define POWERPC
271# define mach_type_known
272# endif
273# if defined(__APPLE__) && defined(__MACH__) && defined(__i386__)
274# define MACOSX
275# define I386
276 --> Not really supported, but at least we recognize it.
277# endif
278# if defined(NeXT) && defined(mc68000)
279# define M68K
280# define NEXT
281# define mach_type_known
282# endif
283# if defined(NeXT) && (defined(i386) || defined(__i386__))
284# define I386
285# define NEXT
286# define mach_type_known
287# endif
288# if defined(__OpenBSD__) && (defined(i386) || defined(__i386__))
289# define I386
290# define OPENBSD
291# define mach_type_known
292# endif
293# if defined(FREEBSD) && (defined(i386) || defined(__i386__))
294# define I386
295# define mach_type_known
296# endif
297# if defined(__NetBSD__) && (defined(i386) || defined(__i386__))
298# define I386
299# define mach_type_known
300# endif
301# if defined(bsdi) && (defined(i386) || defined(__i386__))
302# define I386
303# define BSDI
304# define mach_type_known
305# endif
306# if !defined(mach_type_known) && defined(__386BSD__)
307# define I386
308# define THREE86BSD
309# define mach_type_known
310# endif
311# if defined(_CX_UX) && defined(_M88K)
312# define M88K
313# define CX_UX
314# define mach_type_known
315# endif
316# if defined(DGUX) && defined(m88k)
317# define M88K
318 /* DGUX defined */
319# define mach_type_known
320# endif
321# if defined(_WIN32_WCE)
322 /* SH3, SH4, MIPS already defined for corresponding architectures */
323# if defined(SH3) || defined(SH4)
324# define SH
325# endif
326# if defined(x86)
327# define I386
328# endif
329# if defined(ARM)
330# define ARM32
331# endif
332# define MSWINCE
333# define mach_type_known
334# else
335# if (defined(_MSDOS) || defined(_MSC_VER)) && (_M_IX86 >= 300) \
336 || defined(_WIN32) && !defined(__CYGWIN32__) && !defined(__CYGWIN__)
337# define I386
338# define MSWIN32 /* or Win32s */
339# define mach_type_known
340# endif
341# endif
342# if defined(__DJGPP__)
343# define I386
344# ifndef DJGPP
345# define DJGPP /* MSDOS running the DJGPP port of GCC */
346# endif
347# define mach_type_known
348# endif
349# if defined(__CYGWIN32__) || defined(__CYGWIN__)
350# define I386
351# define CYGWIN32
352# define mach_type_known
353# endif
354# if defined(__MINGW32__)
355# define I386
356# define MSWIN32
357# define mach_type_known
358# endif
359# if defined(__BORLANDC__)
360# define I386
361# define MSWIN32
362# define mach_type_known
363# endif
364# if defined(_UTS) && !defined(mach_type_known)
365# define S370
366# define UTS4
367# define mach_type_known
368# endif
369# if defined(__pj__)
370# define PJ
371# define mach_type_known
372# endif
373# if defined(__embedded__) && defined(PPC)
374# define POWERPC
375# define NOSYS
376# define mach_type_known
377# endif
378/* Ivan Demakov */
379# if defined(__WATCOMC__) && defined(__386__)
380# define I386
381# if !defined(OS2) && !defined(MSWIN32) && !defined(DOS4GW)
382# if defined(__OS2__)
383# define OS2
384# else
385# if defined(__WINDOWS_386__) || defined(__NT__)
386# define MSWIN32
387# else
388# define DOS4GW
389# endif
390# endif
391# endif
392# define mach_type_known
393# endif
394# if defined(__s390__) && defined(LINUX)
395# define S390
396# define mach_type_known
397# endif
398# if defined(__GNU__)
399# if defined(__i386__)
400/* The Debian Hurd running on generic PC */
401# define HURD
402# define I386
403# define mach_type_known
404# endif
405# endif
406
407/* Feel free to add more clauses here */
408
409/* Or manually define the machine type here. A machine type is */
410/* characterized by the architecture. Some */
411/* machine types are further subdivided by OS. */
412/* the macros ULTRIX, RISCOS, and BSD to distinguish. */
413/* Note that SGI IRIX is treated identically to RISCOS. */
414/* SYSV on an M68K actually means A/UX. */
415/* The distinction in these cases is usually the stack starting address */
416# ifndef mach_type_known
417 --> unknown machine type
418# endif
419 /* Mapping is: M68K ==> Motorola 680X0 */
420 /* (SUNOS4,HP,NEXT, and SYSV (A/UX), */
421 /* MACOS and AMIGA variants) */
422 /* I386 ==> Intel 386 */
423 /* (SEQUENT, OS2, SCO, LINUX, NETBSD, */
424 /* FREEBSD, THREE86BSD, MSWIN32, */
425 /* BSDI,SUNOS5, NEXT, other variants) */
426 /* NS32K ==> Encore Multimax */
427 /* MIPS ==> R2000 or R3000 */
428 /* (RISCOS, ULTRIX variants) */
429 /* VAX ==> DEC VAX */
430 /* (BSD, ULTRIX variants) */
431 /* RS6000 ==> IBM RS/6000 AIX3.X */
432 /* RT ==> IBM PC/RT */
433 /* HP_PA ==> HP9000/700 & /800 */
434 /* HP/UX, LINUX */
435 /* SPARC ==> SPARC v7/v8/v9 */
436 /* (SUNOS4, SUNOS5, LINUX, */
437 /* DRSNX variants) */
438 /* ALPHA ==> DEC Alpha */
439 /* (OSF1 and LINUX variants) */
440 /* M88K ==> Motorola 88XX0 */
441 /* (CX_UX and DGUX) */
442 /* S370 ==> 370-like machine */
443 /* running Amdahl UTS4 */
444 /* S390 ==> 390-like machine */
445 /* running LINUX */
446 /* ARM32 ==> Intel StrongARM */
447 /* IA64 ==> Intel IPF */
448 /* (e.g. Itanium) */
449 /* (LINUX and HPUX) */
450 /* SH ==> Hitachi SuperH */
451 /* (LINUX & MSWINCE) */
452 /* X86_64 ==> AMD x86-64 */
453
454
455/*
456 * For each architecture and OS, the following need to be defined:
457 *
458 * CPP_WORD_SZ is a simple integer constant representing the word size.
459 * in bits. We assume byte addressibility, where a byte has 8 bits.
460 * We also assume CPP_WORD_SZ is either 32 or 64.
461 * (We care about the length of pointers, not hardware
462 * bus widths. Thus a 64 bit processor with a C compiler that uses
463 * 32 bit pointers should use CPP_WORD_SZ of 32, not 64. Default is 32.)
464 *
465 * MACH_TYPE is a string representation of the machine type.
466 * OS_TYPE is analogous for the OS.
467 *
468 * ALIGNMENT is the largest N, such that
469 * all pointer are guaranteed to be aligned on N byte boundaries.
470 * defining it to be 1 will always work, but perform poorly.
471 *
472 * DATASTART is the beginning of the data segment.
473 * On some platforms SEARCH_FOR_DATA_START is defined.
474 * SEARCH_FOR_DATASTART will cause GC_data_start to
475 * be set to an address determined by accessing data backwards from _end
476 * until an unmapped page is found. DATASTART will be defined to be
477 * GC_data_start.
478 * On UNIX-like systems, the collector will scan the area between DATASTART
479 * and DATAEND for root pointers.
480 *
481 * DATAEND, if not `end' where `end' is defined as ``extern int end[];''.
482 * RTH suggests gaining access to linker script synth'd values with
483 * this idiom instead of `&end' where `end' is defined as ``extern int end;'' .
484 *
485 * ALIGN_DOUBLE of GC_malloc should return blocks aligned to twice
486 * the pointer size.
487 *
488 * STACKBOTTOM is the cool end of the stack, which is usually the
489 * highest address in the stack.
490 * Under PCR or OS/2, we have other ways of finding thread stacks.
491 * For each machine, the following should:
492 * 1) define STACK_GROWS_UP if the stack grows toward higher addresses, and
493 * 2) define exactly one of
494 * STACKBOTTOM (should be defined to be an expression)
495 * LINUX_STACKBOTTOM
496 * HEURISTIC1
497 * HEURISTIC2
498 * If STACKBOTTOM is defined, then it's value will be used directly as the
499 * stack base. If LINUX_STACKBOTTOM is defined, then it will be determined
500 * with a method appropriate for most Linux systems. Currently we look
501 * first for __libc_stack_end, and if that fails read it from /proc.
502 * If either of the last two macros are defined, then STACKBOTTOM is computed
503 * during collector startup using one of the following two heuristics:
504 * HEURISTIC1: Take an address inside GC_init's frame, and round it up to
505 * the next multiple of STACK_GRAN.
506 * HEURISTIC2: Take an address inside GC_init's frame, increment it repeatedly
507 * in small steps (decrement if STACK_GROWS_UP), and read the value
508 * at each location. Remember the value when the first
509 * Segmentation violation or Bus error is signalled. Round that
510 * to the nearest plausible page boundary, and use that instead
511 * of STACKBOTTOM.
512 *
513 * Gustavo Rodriguez-Rivera points out that on most (all?) Unix machines,
514 * the value of environ is a pointer that can serve as STACKBOTTOM.
515 * I expect that HEURISTIC2 can be replaced by this approach, which
516 * interferes far less with debugging. However it has the disadvantage
517 * that it's confused by a putenv call before the collector is initialized.
518 * This could be dealt with by intercepting putenv ...
519 *
520 * If no expression for STACKBOTTOM can be found, and neither of the above
521 * heuristics are usable, the collector can still be used with all of the above
522 * undefined, provided one of the following is done:
523 * 1) GC_mark_roots can be changed to somehow mark from the correct stack(s)
524 * without reference to STACKBOTTOM. This is appropriate for use in
525 * conjunction with thread packages, since there will be multiple stacks.
526 * (Allocating thread stacks in the heap, and treating them as ordinary
527 * heap data objects is also possible as a last resort. However, this is
528 * likely to introduce significant amounts of excess storage retention
529 * unless the dead parts of the thread stacks are periodically cleared.)
530 * 2) Client code may set GC_stackbottom before calling any GC_ routines.
531 * If the author of the client code controls the main program, this is
532 * easily accomplished by introducing a new main program, setting
533 * GC_stackbottom to the address of a local variable, and then calling
534 * the original main program. The new main program would read something
535 * like:
536 *
537 * # include "gc_private.h"
538 *
539 * main(argc, argv, envp)
540 * int argc;
541 * char **argv, **envp;
542 * {
543 * int dummy;
544 *
545 * GC_stackbottom = (ptr_t)(&dummy);
546 * return(real_main(argc, argv, envp));
547 * }
548 *
549 *
550 * Each architecture may also define the style of virtual dirty bit
551 * implementation to be used:
552 * MPROTECT_VDB: Write protect the heap and catch faults.
553 * PROC_VDB: Use the SVR4 /proc primitives to read dirty bits.
554 *
555 * An architecture may define DYNAMIC_LOADING if dynamic_load.c
556 * defined GC_register_dynamic_libraries() for the architecture.
557 *
558 * An architecture may define PREFETCH(x) to preload the cache with *x.
559 * This defaults to a no-op.
560 *
561 * PREFETCH_FOR_WRITE(x) is used if *x is about to be written.
562 *
563 * An architecture may also define CLEAR_DOUBLE(x) to be a fast way to
564 * clear the two words at GC_malloc-aligned address x. By default,
565 * word stores of 0 are used instead.
566 *
567 * HEAP_START may be defined as the initial address hint for mmap-based
568 * allocation.
569 */
570
571/* If we are using a recent version of gcc, we can use __builtin_unwind_init()
572 * to push the relevant registers onto the stack. This generally makes
573 * USE_GENERIC_PUSH_REGS the preferred approach for marking from registers.
574 */
575# if defined(__GNUC__) && ((__GNUC__ >= 3) || \
576 (__GNUC__ == 2 && __GNUC_MINOR__ >= 8))
577# define HAVE_BUILTIN_UNWIND_INIT
578# endif
579
580# define STACK_GRAN 0x1000000
581# ifdef M68K
582# define MACH_TYPE "M68K"
583# define ALIGNMENT 2
584# ifdef OPENBSD
585# define OS_TYPE "OPENBSD"
586# define HEURISTIC2
587 extern char etext[];
588# define DATASTART ((ptr_t)(etext))
589# endif
590# ifdef NETBSD
591# define OS_TYPE "NETBSD"
592# define HEURISTIC2
593 extern char etext[];
594# define DATASTART ((ptr_t)(etext))
595# endif
596# ifdef LINUX
597# define OS_TYPE "LINUX"
598# define STACKBOTTOM ((ptr_t)0xf0000000)
599/* # define MPROTECT_VDB - Reported to not work 9/17/01 */
600# ifdef __ELF__
601# define DYNAMIC_LOADING
602# include <features.h>
603# if defined(__GLIBC__)&& __GLIBC__>=2
604# define SEARCH_FOR_DATA_START
605# else /* !GLIBC2 */
606 extern char **__environ;
607# define DATASTART ((ptr_t)(&__environ))
608 /* hideous kludge: __environ is the first */
609 /* word in crt0.o, and delimits the start */
610 /* of the data segment, no matter which */
611 /* ld options were passed through. */
612 /* We could use _etext instead, but that */
613 /* would include .rodata, which may */
614 /* contain large read-only data tables */
615 /* that we'd rather not scan. */
616# endif /* !GLIBC2 */
617 extern int _end[];
618# define DATAEND (_end)
619# else
620 extern int etext[];
621# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
622# endif
623# endif
624# ifdef SUNOS4
625# define OS_TYPE "SUNOS4"
626 extern char etext[];
627# define DATASTART ((ptr_t)((((word) (etext)) + 0x1ffff) & ~0x1ffff))
628# define HEURISTIC1 /* differs */
629# define DYNAMIC_LOADING
630# endif
631# ifdef HP
632# define OS_TYPE "HP"
633 extern char etext[];
634# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
635# define STACKBOTTOM ((ptr_t) 0xffeffffc)
636 /* empirically determined. seems to work. */
637# include <unistd.h>
638# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
639# endif
640# ifdef SYSV
641# define OS_TYPE "SYSV"
642 extern etext[];
643# define DATASTART ((ptr_t)((((word) (etext)) + 0x3fffff) \
644 & ~0x3fffff) \
645 +((word)etext & 0x1fff))
646 /* This only works for shared-text binaries with magic number 0413.
647 The other sorts of SysV binaries put the data at the end of the text,
648 in which case the default of etext would work. Unfortunately,
649 handling both would require having the magic-number available.
650 -- Parag
651 */
652# define STACKBOTTOM ((ptr_t)0xFFFFFFFE)
653 /* The stack starts at the top of memory, but */
654 /* 0x0 cannot be used as setjump_test complains */
655 /* that the stack direction is incorrect. Two */
656 /* bytes down from 0x0 should be safe enough. */
657 /* --Parag */
658# include <sys/mmu.h>
659# define GETPAGESIZE() PAGESIZE /* Is this still right? */
660# endif
661# ifdef AMIGA
662# define OS_TYPE "AMIGA"
663 /* STACKBOTTOM and DATASTART handled specially */
664 /* in os_dep.c */
665# define DATAEND /* not needed */
666# define GETPAGESIZE() 4096
667# endif
668# ifdef MACOS
669# ifndef __LOWMEM__
670# include <LowMem.h>
671# endif
672# define OS_TYPE "MACOS"
673 /* see os_dep.c for details of global data segments. */
674# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
675# define DATAEND /* not needed */
676# define GETPAGESIZE() 4096
677# endif
678# ifdef NEXT
679# define OS_TYPE "NEXT"
680# define DATASTART ((ptr_t) get_etext())
681# define STACKBOTTOM ((ptr_t) 0x4000000)
682# define DATAEND /* not needed */
683# endif
684# endif
685
686# ifdef POWERPC
687# define MACH_TYPE "POWERPC"
688# ifdef MACOS
689# define ALIGNMENT 2 /* Still necessary? Could it be 4? */
690# ifndef __LOWMEM__
691# include <LowMem.h>
692# endif
693# define OS_TYPE "MACOS"
694 /* see os_dep.c for details of global data segments. */
695# define STACKBOTTOM ((ptr_t) LMGetCurStackBase())
696# define DATAEND /* not needed */
697# endif
698# ifdef LINUX
699# define ALIGNMENT 4 /* Guess. Can someone verify? */
700 /* This was 2, but that didn't sound right. */
701# define OS_TYPE "LINUX"
702 /* HEURISTIC1 has been reliably reported to fail for a 32-bit */
703 /* executable on a 64 bit kernel. */
704# define LINUX_STACKBOTTOM
705# define DYNAMIC_LOADING
706# undef STACK_GRAN
707# define STACK_GRAN 0x10000000
708 /* Stack usually starts at 0x80000000 */
709# define SEARCH_FOR_DATA_START
710 extern int _end[];
711# define DATAEND (_end)
712# endif
713# ifdef MACOSX
714 /* There are reasons to suspect this may not be reliable. */
715# define ALIGNMENT 4
716# define OS_TYPE "MACOSX"
717# ifdef GC_MACOSX_THREADS
718# define SIG_SUSPEND SIGXCPU
719# define SIG_THR_RESTART SIGXFSZ
720# endif
721# define DYNAMIC_LOADING
722 /* XXX: see get_end(3), get_etext() and get_end() should not be used */
723# define DATASTART ((ptr_t) get_etext())
724# define STACKBOTTOM ((ptr_t) 0xc0000000)
725# define DATAEND ((ptr_t) get_end())
726# define USE_MMAP
727# define USE_MMAP_ANON
728/* # define MPROTECT_VDB -- There is some evidence that this breaks
729 * on some minor versions of MACOSX, i.e. 10.2.3. In theory,
730 * it should be OK */
731# include <unistd.h>
732# define GETPAGESIZE() getpagesize()
733# if defined(USE_PPC_PREFETCH) && defined(__GNUC__)
734 /* The performance impact of prefetches is untested */
735# define PREFETCH(x) \
736 __asm__ __volatile__ ("dcbt 0,%0" : : "r" ((const void *) (x)))
737# define PREFETCH_FOR_WRITE(x) \
738 __asm__ __volatile__ ("dcbtst 0,%0" : : "r" ((const void *) (x)))
739# endif
740# endif
741# ifdef NETBSD
742# define ALIGNMENT 4
743# define OS_TYPE "NETBSD"
744# define HEURISTIC2
745 extern char etext[];
746# define DATASTART GC_data_start
747# define DYNAMIC_LOADING
748# endif
749# ifdef NOSYS
750# define ALIGNMENT 4
751# define OS_TYPE "NOSYS"
752 extern void __end[], __dso_handle[];
753# define DATASTART (__dso_handle) /* OK, that's ugly. */
754# define DATAEND (__end)
755 /* Stack starts at 0xE0000000 for the simulator. */
756# undef STACK_GRAN
757# define STACK_GRAN 0x10000000
758# define HEURISTIC1
759# endif
760# endif
761
762# ifdef VAX
763# define MACH_TYPE "VAX"
764# define ALIGNMENT 4 /* Pointers are longword aligned by 4.2 C compiler */
765 extern char etext[];
766# define DATASTART ((ptr_t)(etext))
767# ifdef BSD
768# define OS_TYPE "BSD"
769# define HEURISTIC1
770 /* HEURISTIC2 may be OK, but it's hard to test. */
771# endif
772# ifdef ULTRIX
773# define OS_TYPE "ULTRIX"
774# define STACKBOTTOM ((ptr_t) 0x7fffc800)
775# endif
776# endif
777
778# ifdef RT
779# define MACH_TYPE "RT"
780# define ALIGNMENT 4
781# define DATASTART ((ptr_t) 0x10000000)
782# define STACKBOTTOM ((ptr_t) 0x1fffd800)
783# endif
784
785# ifdef SPARC
786# define MACH_TYPE "SPARC"
787# if defined(__arch64__) || defined(__sparcv9)
788# define ALIGNMENT 8
789# define CPP_WORDSZ 64
790# define ELF_CLASS ELFCLASS64
791# else
792# define ALIGNMENT 4 /* Required by hardware */
793# define CPP_WORDSZ 32
794# endif
795# define ALIGN_DOUBLE
796# ifdef SUNOS5
797# define OS_TYPE "SUNOS5"
798 extern int _etext[];
799 extern int _end[];
800 extern ptr_t GC_SysVGetDataStart();
801# define DATASTART GC_SysVGetDataStart(0x10000, _etext)
802# define DATAEND (_end)
803# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
804# define USE_MMAP
805 /* Otherwise we now use calloc. Mmap may result in the */
806 /* heap interleaved with thread stacks, which can result in */
807 /* excessive blacklisting. Sbrk is unusable since it */
808 /* doesn't interact correctly with the system malloc. */
809# endif
810# ifdef USE_MMAP
811# define HEAP_START (ptr_t)0x40000000
812# else
813# define HEAP_START DATAEND
814# endif
815# define PROC_VDB
816/* HEURISTIC1 reportedly no longer works under 2.7. */
817/* HEURISTIC2 probably works, but this appears to be preferable. */
818/* Apparently USRSTACK is defined to be USERLIMIT, but in some */
819/* installations that's undefined. We work around this with a */
820/* gross hack: */
821# include <sys/vmparam.h>
822# ifdef USERLIMIT
823 /* This should work everywhere, but doesn't. */
824# define STACKBOTTOM USRSTACK
825# else
826# define HEURISTIC2
827# endif
828# include <unistd.h>
829# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
830 /* getpagesize() appeared to be missing from at least one */
831 /* Solaris 5.4 installation. Weird. */
832# define DYNAMIC_LOADING
833# endif
834# ifdef SUNOS4
835# define OS_TYPE "SUNOS4"
836 /* [If you have a weak stomach, don't read this.] */
837 /* We would like to use: */
838/* # define DATASTART ((ptr_t)((((word) (etext)) + 0x1fff) & ~0x1fff)) */
839 /* This fails occasionally, due to an ancient, but very */
840 /* persistent ld bug. etext is set 32 bytes too high. */
841 /* We instead read the text segment size from the a.out */
842 /* header, which happens to be mapped into our address space */
843 /* at the start of the text segment. The detective work here */
844 /* was done by Robert Ehrlich, Manuel Serrano, and Bernard */
845 /* Serpette of INRIA. */
846 /* This assumes ZMAGIC, i.e. demand-loadable executables. */
847# define TEXTSTART 0x2000
848# define DATASTART ((ptr_t)(*(int *)(TEXTSTART+0x4)+TEXTSTART))
849# define MPROTECT_VDB
850# define HEURISTIC1
851# define DYNAMIC_LOADING
852# endif
853# ifdef DRSNX
854# define OS_TYPE "DRSNX"
855 extern ptr_t GC_SysVGetDataStart();
856 extern int etext[];
857# define DATASTART GC_SysVGetDataStart(0x10000, etext)
858# define MPROTECT_VDB
859# define STACKBOTTOM ((ptr_t) 0xdfff0000)
860# define DYNAMIC_LOADING
861# endif
862# ifdef LINUX
863# define OS_TYPE "LINUX"
864# ifdef __ELF__
865# define DYNAMIC_LOADING
866# else
867 Linux Sparc/a.out not supported
868# endif
869 extern int _end[];
870 extern int _etext[];
871# define DATAEND (_end)
872# define SVR4
873 extern ptr_t GC_SysVGetDataStart();
874# ifdef __arch64__
875# define DATASTART GC_SysVGetDataStart(0x100000, _etext)
876 /* libc_stack_end is not set reliably for sparc64 */
877# define STACKBOTTOM ((ptr_t) 0x80000000000ULL)
878# else
879# define DATASTART GC_SysVGetDataStart(0x10000, _etext)
880# define LINUX_STACKBOTTOM
881# endif
882# endif
883# ifdef OPENBSD
884# define OS_TYPE "OPENBSD"
885# define STACKBOTTOM ((ptr_t) 0xf8000000)
886 extern int etext[];
887# define DATASTART ((ptr_t)(etext))
888# endif
889# ifdef NETBSD
890# define OS_TYPE "NETBSD"
891# define HEURISTIC2
892# ifdef __ELF__
893# define DATASTART GC_data_start
894# define DYNAMIC_LOADING
895# else
896 extern char etext[];
897# define DATASTART ((ptr_t)(etext))
898# endif
899# endif
900# endif
901
902# ifdef I386
903# define MACH_TYPE "I386"
904# define ALIGNMENT 4 /* Appears to hold for all "32 bit" compilers */
905 /* except Borland. The -a4 option fixes */
906 /* Borland. */
907 /* Ivan Demakov: For Watcom the option is -zp4. */
908# ifndef SMALL_CONFIG
909# define ALIGN_DOUBLE /* Not strictly necessary, but may give speed */
910 /* improvement on Pentiums. */
911# endif
912# ifdef HAVE_BUILTIN_UNWIND_INIT
913# define USE_GENERIC_PUSH_REGS
914# endif
915# ifdef SEQUENT
916# define OS_TYPE "SEQUENT"
917 extern int etext[];
918# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
919# define STACKBOTTOM ((ptr_t) 0x3ffff000)
920# endif
921# ifdef BEOS
922# define OS_TYPE "BEOS"
923# include <OS.h>
924# define GETPAGESIZE() B_PAGE_SIZE
925 extern int etext[];
926# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
927# endif
928# ifdef SUNOS5
929# define OS_TYPE "SUNOS5"
930 extern int _etext[], _end[];
931 extern ptr_t GC_SysVGetDataStart();
932# define DATASTART GC_SysVGetDataStart(0x1000, _etext)
933# define DATAEND (_end)
934/* # define STACKBOTTOM ((ptr_t)(_start)) worked through 2.7, */
935/* but reportedly breaks under 2.8. It appears that the stack */
936/* base is a property of the executable, so this should not break */
937/* old executables. */
938/* HEURISTIC2 probably works, but this appears to be preferable. */
939# include <sys/vm.h>
940# define STACKBOTTOM USRSTACK
941/* At least in Solaris 2.5, PROC_VDB gives wrong values for dirty bits. */
942/* It appears to be fixed in 2.8 and 2.9. */
943# ifdef SOLARIS25_PROC_VDB_BUG_FIXED
944# define PROC_VDB
945# endif
946# define DYNAMIC_LOADING
947# if !defined(USE_MMAP) && defined(REDIRECT_MALLOC)
948# define USE_MMAP
949 /* Otherwise we now use calloc. Mmap may result in the */
950 /* heap interleaved with thread stacks, which can result in */
951 /* excessive blacklisting. Sbrk is unusable since it */
952 /* doesn't interact correctly with the system malloc. */
953# endif
954# ifdef USE_MMAP
955# define HEAP_START (ptr_t)0x40000000
956# else
957# define HEAP_START DATAEND
958# endif
959# endif
960# ifdef SCO
961# define OS_TYPE "SCO"
962 extern int etext[];
963# define DATASTART ((ptr_t)((((word) (etext)) + 0x3fffff) \
964 & ~0x3fffff) \
965 +((word)etext & 0xfff))
966# define STACKBOTTOM ((ptr_t) 0x7ffffffc)
967# endif
968# ifdef SCO_ELF
969# define OS_TYPE "SCO_ELF"
970 extern int etext[];
971# define DATASTART ((ptr_t)(etext))
972# define STACKBOTTOM ((ptr_t) 0x08048000)
973# define DYNAMIC_LOADING
974# define ELF_CLASS ELFCLASS32
975# endif
976# ifdef DGUX
977# define OS_TYPE "DGUX"
978 extern int _etext, _end;
979 extern ptr_t GC_SysVGetDataStart();
980# define DATASTART GC_SysVGetDataStart(0x1000, &_etext)
981# define DATAEND (&_end)
982# define STACK_GROWS_DOWN
983# define HEURISTIC2
984# include <unistd.h>
985# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
986# define DYNAMIC_LOADING
987# ifndef USE_MMAP
988# define USE_MMAP
989# endif /* USE_MMAP */
990# define MAP_FAILED (void *) -1
991# ifdef USE_MMAP
992# define HEAP_START (ptr_t)0x40000000
993# else /* USE_MMAP */
994# define HEAP_START DATAEND
995# endif /* USE_MMAP */
996# endif /* DGUX */
997
998# ifdef LINUX
999# ifndef __GNUC__
1000 /* The Intel compiler doesn't like inline assembly */
1001# define USE_GENERIC_PUSH_REGS
1002# endif
1003# define OS_TYPE "LINUX"
1004# define LINUX_STACKBOTTOM
1005# if 0
1006# define HEURISTIC1
1007# undef STACK_GRAN
1008# define STACK_GRAN 0x10000000
1009 /* STACKBOTTOM is usually 0xc0000000, but this changes with */
1010 /* different kernel configurations. In particular, systems */
1011 /* with 2GB physical memory will usually move the user */
1012 /* address space limit, and hence initial SP to 0x80000000. */
1013# endif
1014# if !defined(GC_LINUX_THREADS) || !defined(REDIRECT_MALLOC)
1015# define MPROTECT_VDB
1016# else
1017 /* We seem to get random errors in incremental mode, */
1018 /* possibly because Linux threads is itself a malloc client */
1019 /* and can't deal with the signals. */
1020# endif
1021# define HEAP_START 0x1000
1022 /* This encourages mmap to give us low addresses, */
1023 /* thus allowing the heap to grow to ~3GB */
1024# ifdef __ELF__
1025# define DYNAMIC_LOADING
1026# ifdef UNDEFINED /* includes ro data */
1027 extern int _etext[];
1028# define DATASTART ((ptr_t)((((word) (_etext)) + 0xfff) & ~0xfff))
1029# endif
1030# include <features.h>
1031# if defined(__GLIBC__) && __GLIBC__ >= 2
1032# define SEARCH_FOR_DATA_START
1033# else
1034 extern char **__environ;
1035# define DATASTART ((ptr_t)(&__environ))
1036 /* hideous kludge: __environ is the first */
1037 /* word in crt0.o, and delimits the start */
1038 /* of the data segment, no matter which */
1039 /* ld options were passed through. */
1040 /* We could use _etext instead, but that */
1041 /* would include .rodata, which may */
1042 /* contain large read-only data tables */
1043 /* that we'd rather not scan. */
1044# endif
1045 extern int _end[];
1046# define DATAEND (_end)
1047# else
1048 extern int etext[];
1049# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
1050# endif
1051# ifdef USE_I686_PREFETCH
1052# define PREFETCH(x) \
1053 __asm__ __volatile__ (" prefetchnta %0": : "m"(*(char *)(x)))
1054 /* Empirically prefetcht0 is much more effective at reducing */
1055 /* cache miss stalls for the targetted load instructions. But it */
1056 /* seems to interfere enough with other cache traffic that the net */
1057 /* result is worse than prefetchnta. */
1058# if 0
1059 /* Using prefetches for write seems to have a slight negative */
1060 /* impact on performance, at least for a PIII/500. */
1061# define PREFETCH_FOR_WRITE(x) \
1062 __asm__ __volatile__ (" prefetcht0 %0": : "m"(*(char *)(x)))
1063# endif
1064# endif
1065# ifdef USE_3DNOW_PREFETCH
1066# define PREFETCH(x) \
1067 __asm__ __volatile__ (" prefetch %0": : "m"(*(char *)(x)))
1068# define PREFETCH_FOR_WRITE(x) \
1069 __asm__ __volatile__ (" prefetchw %0": : "m"(*(char *)(x)))
1070# endif
1071# endif
1072# ifdef CYGWIN32
1073# define OS_TYPE "CYGWIN32"
1074 extern int _data_start__[];
1075 extern int _data_end__[];
1076 extern int _bss_start__[];
1077 extern int _bss_end__[];
1078 /* For binutils 2.9.1, we have */
1079 /* DATASTART = _data_start__ */
1080 /* DATAEND = _bss_end__ */
1081 /* whereas for some earlier versions it was */
1082 /* DATASTART = _bss_start__ */
1083 /* DATAEND = _data_end__ */
1084 /* To get it right for both, we take the */
1085 /* minumum/maximum of the two. */
1086# define MAX(x,y) ((x) > (y) ? (x) : (y))
1087# define MIN(x,y) ((x) < (y) ? (x) : (y))
1088# define DATASTART ((ptr_t) MIN(_data_start__, _bss_start__))
1089# define DATAEND ((ptr_t) MAX(_data_end__, _bss_end__))
1090# undef STACK_GRAN
1091# define STACK_GRAN 0x10000
1092# define HEURISTIC1
1093# endif
1094# ifdef OS2
1095# define OS_TYPE "OS2"
1096 /* STACKBOTTOM and DATASTART are handled specially in */
1097 /* os_dep.c. OS2 actually has the right */
1098 /* system call! */
1099# define DATAEND /* not needed */
1100# define USE_GENERIC_PUSH_REGS
1101# endif
1102# ifdef MSWIN32
1103# define OS_TYPE "MSWIN32"
1104 /* STACKBOTTOM and DATASTART are handled specially in */
1105 /* os_dep.c. */
1106# ifndef __WATCOMC__
1107# define MPROTECT_VDB
1108# endif
1109# define DATAEND /* not needed */
1110# endif
1111# ifdef MSWINCE
1112# define OS_TYPE "MSWINCE"
1113# define DATAEND /* not needed */
1114# endif
1115# ifdef DJGPP
1116# define OS_TYPE "DJGPP"
1117# include "stubinfo.h"
1118 extern int etext[];
1119 extern int _stklen;
1120 extern int __djgpp_stack_limit;
1121# define DATASTART ((ptr_t)((((word) (etext)) + 0x1ff) & ~0x1ff))
1122/* # define STACKBOTTOM ((ptr_t)((word) _stubinfo + _stubinfo->size \
1123 + _stklen)) */
1124# define STACKBOTTOM ((ptr_t)((word) __djgpp_stack_limit + _stklen))
1125 /* This may not be right. */
1126# endif
1127# ifdef OPENBSD
1128# define OS_TYPE "OPENBSD"
1129# endif
1130# ifdef FREEBSD
1131# define OS_TYPE "FREEBSD"
1132# ifndef GC_FREEBSD_THREADS
1133# define MPROTECT_VDB
1134# endif
1135# define SIG_SUSPEND SIGUSR1
1136# define SIG_THR_RESTART SIGUSR2
1137# define FREEBSD_STACKBOTTOM
1138# ifdef __ELF__
1139# define DYNAMIC_LOADING
1140# endif
1141 extern char etext[];
1142 extern char * GC_FreeBSDGetDataStart();
1143# define DATASTART GC_FreeBSDGetDataStart(0x1000, &etext)
1144# endif
1145# ifdef NETBSD
1146# define OS_TYPE "NETBSD"
1147# ifdef __ELF__
1148# define DYNAMIC_LOADING
1149# endif
1150# endif
1151# ifdef THREE86BSD
1152# define OS_TYPE "THREE86BSD"
1153# endif
1154# ifdef BSDI
1155# define OS_TYPE "BSDI"
1156# endif
1157# if defined(OPENBSD) || defined(NETBSD) \
1158 || defined(THREE86BSD) || defined(BSDI)
1159# define HEURISTIC2
1160 extern char etext[];
1161# define DATASTART ((ptr_t)(etext))
1162# endif
1163# ifdef NEXT
1164# define OS_TYPE "NEXT"
1165# define DATASTART ((ptr_t) get_etext())
1166# define STACKBOTTOM ((ptr_t)0xc0000000)
1167# define DATAEND /* not needed */
1168# endif
1169# ifdef DOS4GW
1170# define OS_TYPE "DOS4GW"
1171 extern long __nullarea;
1172 extern char _end;
1173 extern char *_STACKTOP;
1174 /* Depending on calling conventions Watcom C either precedes
1175 or does not precedes with undescore names of C-variables.
1176 Make sure startup code variables always have the same names. */
1177 #pragma aux __nullarea "*";
1178 #pragma aux _end "*";
1179# define STACKBOTTOM ((ptr_t) _STACKTOP)
1180 /* confused? me too. */
1181# define DATASTART ((ptr_t) &__nullarea)
1182# define DATAEND ((ptr_t) &_end)
1183# endif
1184# ifdef HURD
1185# define OS_TYPE "HURD"
1186# define STACK_GROWS_DOWN
1187# define HEURISTIC2
1188 extern int __data_start[];
1189# define DATASTART ( (ptr_t) (__data_start))
1190 extern int _end[];
1191# define DATAEND ( (ptr_t) (_end))
1192/* # define MPROTECT_VDB Not quite working yet? */
1193# define DYNAMIC_LOADING
1194# endif
1195# endif
1196
1197# ifdef NS32K
1198# define MACH_TYPE "NS32K"
1199# define ALIGNMENT 4
1200 extern char **environ;
1201# define DATASTART ((ptr_t)(&environ))
1202 /* hideous kludge: environ is the first */
1203 /* word in crt0.o, and delimits the start */
1204 /* of the data segment, no matter which */
1205 /* ld options were passed through. */
1206# define STACKBOTTOM ((ptr_t) 0xfffff000) /* for Encore */
1207# endif
1208
1209# ifdef MIPS
1210# define MACH_TYPE "MIPS"
1211# ifdef LINUX
1212 /* This was developed for a linuxce style platform. Probably */
1213 /* needs to be tweaked for workstation class machines. */
1214# define OS_TYPE "LINUX"
1215# define DYNAMIC_LOADING
1216 extern int _end[];
1217# define DATAEND (_end)
1218 extern int __data_start[];
1219# define DATASTART ((ptr_t)(__data_start))
1220# define ALIGNMENT 4
1221# define USE_GENERIC_PUSH_REGS
1222# if __GLIBC__ == 2 && __GLIBC_MINOR__ >= 2 || __GLIBC__ > 2
1223# define LINUX_STACKBOTTOM
1224# else
1225# define STACKBOTTOM 0x80000000
1226# endif
1227# endif /* Linux */
1228# ifdef EWS4800
1229# define HEURISTIC2
1230# if defined(_MIPS_SZPTR) && (_MIPS_SZPTR == 64)
1231 extern int _fdata[], _end[];
1232# define DATASTART ((ptr_t)_fdata)
1233# define DATAEND ((ptr_t)_end)
1234# define CPP_WORDSZ _MIPS_SZPTR
1235# define ALIGNMENT (_MIPS_SZPTR/8)
1236# else
1237 extern int etext[], edata[], end[];
1238 extern int _DYNAMIC_LINKING[], _gp[];
1239# define DATASTART ((ptr_t)((((word)etext + 0x3ffff) & ~0x3ffff) \
1240 + ((word)etext & 0xffff)))
1241# define DATAEND (edata)
1242# define DATASTART2 (_DYNAMIC_LINKING \
1243 ? (ptr_t)(((word)_gp + 0x8000 + 0x3ffff) & ~0x3ffff) \
1244 : (ptr_t)edata)
1245# define DATAEND2 (end)
1246# define ALIGNMENT 4
1247# endif
1248# define OS_TYPE "EWS4800"
1249# define USE_GENERIC_PUSH_REGS 1
1250# endif
1251# ifdef ULTRIX
1252# define HEURISTIC2
1253# define DATASTART (ptr_t)0x10000000
1254 /* Could probably be slightly higher since */
1255 /* startup code allocates lots of stuff. */
1256# define OS_TYPE "ULTRIX"
1257# define ALIGNMENT 4
1258# endif
1259# ifdef RISCOS
1260# define HEURISTIC2
1261# define DATASTART (ptr_t)0x10000000
1262# define OS_TYPE "RISCOS"
1263# define ALIGNMENT 4 /* Required by hardware */
1264# endif
1265# ifdef IRIX5
1266# define HEURISTIC2
1267 extern int _fdata[];
1268# define DATASTART ((ptr_t)(_fdata))
1269# ifdef USE_MMAP
1270# define HEAP_START (ptr_t)0x30000000
1271# else
1272# define HEAP_START DATASTART
1273# endif
1274 /* Lowest plausible heap address. */
1275 /* In the MMAP case, we map there. */
1276 /* In either case it is used to identify */
1277 /* heap sections so they're not */
1278 /* considered as roots. */
1279# define OS_TYPE "IRIX5"
1280# define MPROTECT_VDB
1281# ifdef _MIPS_SZPTR
1282# define CPP_WORDSZ _MIPS_SZPTR
1283# define ALIGNMENT (_MIPS_SZPTR/8)
1284# if CPP_WORDSZ != 64
1285# define ALIGN_DOUBLE
1286# endif
1287# else
1288# define ALIGNMENT 4
1289# define ALIGN_DOUBLE
1290# endif
1291# define DYNAMIC_LOADING
1292# endif
1293# ifdef MSWINCE
1294# define OS_TYPE "MSWINCE"
1295# define ALIGNMENT 4
1296# define DATAEND /* not needed */
1297# endif
1298# if defined(NETBSD)
1299 /* This also checked for __MIPSEL__ . Why? NETBSD recognition */
1300 /* should be handled at the top of the file. */
1301# define ALIGNMENT 4
1302# define OS_TYPE "NETBSD"
1303# define HEURISTIC2
1304# define USE_GENERIC_PUSH_REGS
1305# ifdef __ELF__
1306 extern int etext[];
1307# define DATASTART GC_data_start
1308# define NEED_FIND_LIMIT
1309# define DYNAMIC_LOADING
1310# else
1311# define DATASTART ((ptr_t) 0x10000000)
1312# define STACKBOTTOM ((ptr_t) 0x7ffff000)
1313# endif /* _ELF_ */
1314# endif
1315# endif
1316
1317# ifdef RS6000
1318# define MACH_TYPE "RS6000"
1319# ifdef __64BIT__
1320# define ALIGNMENT 8
1321# define CPP_WORDSZ 64
1322# define STACKBOTTOM 0x1000000000000000
1323# else
1324# define ALIGNMENT 4
1325# define CPP_WORDSZ 32
1326# define STACKBOTTOM ((ptr_t)((ulong)&errno))
1327# endif
1328 extern int _data[], _end[];
1329# define DATASTART ((ptr_t)((ulong)_data))
1330# define DATAEND ((ptr_t)((ulong)_end))
1331 extern int errno;
1332# define USE_GENERIC_PUSH_REGS
1333# define DYNAMIC_LOADING
1334 /* For really old versions of AIX, this may have to be removed. */
1335# endif
1336
1337# ifdef HP_PA
1338# define MACH_TYPE "HP_PA"
1339# ifdef __LP64__
1340# define CPP_WORDSZ 64
1341# define ALIGNMENT 8
1342# else
1343# define CPP_WORDSZ 32
1344# define ALIGNMENT 4
1345# define ALIGN_DOUBLE
1346# endif
1347# if !defined(GC_HPUX_THREADS) && !defined(GC_LINUX_THREADS)
1348# ifndef LINUX /* For now. */
1349# define MPROTECT_VDB
1350# endif
1351# else
1352# define GENERIC_COMPARE_AND_SWAP
1353 /* No compare-and-swap instruction. Use pthread mutexes */
1354 /* when we absolutely have to. */
1355# ifdef PARALLEL_MARK
1356# define USE_MARK_BYTES
1357 /* Minimize compare-and-swap usage. */
1358# endif
1359# endif
1360# define STACK_GROWS_UP
1361# ifdef HPUX
1362# define OS_TYPE "HPUX"
1363 extern int __data_start[];
1364# define DATASTART ((ptr_t)(__data_start))
1365# if 0
1366 /* The following appears to work for 7xx systems running HP/UX */
1367 /* 9.xx Furthermore, it might result in much faster */
1368 /* collections than HEURISTIC2, which may involve scanning */
1369 /* segments that directly precede the stack. It is not the */
1370 /* default, since it may not work on older machine/OS */
1371 /* combinations. (Thanks to Raymond X.T. Nijssen for uncovering */
1372 /* this.) */
1373# define STACKBOTTOM ((ptr_t) 0x7b033000) /* from /etc/conf/h/param.h */
1374# else
1375 /* Gustavo Rodriguez-Rivera suggested changing HEURISTIC2 */
1376 /* to this. Note that the GC must be initialized before the */
1377 /* first putenv call. */
1378 extern char ** environ;
1379# define STACKBOTTOM ((ptr_t)environ)
1380# endif
1381# define DYNAMIC_LOADING
1382# include <unistd.h>
1383# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
1384# ifndef __GNUC__
1385# define PREFETCH(x) { \
1386 register long addr = (long)(x); \
1387 (void) _asm ("LDW", 0, 0, addr, 0); \
1388 }
1389# endif
1390# endif /* HPUX */
1391# ifdef LINUX
1392# define OS_TYPE "LINUX"
1393# define LINUX_STACKBOTTOM
1394# define DYNAMIC_LOADING
1395# define SEARCH_FOR_DATA_START
1396 extern int _end[];
1397# define DATAEND (&_end)
1398# endif /* LINUX */
1399# endif /* HP_PA */
1400
1401# ifdef ALPHA
1402# define MACH_TYPE "ALPHA"
1403# define ALIGNMENT 8
1404# define CPP_WORDSZ 64
1405# ifndef LINUX
1406# define USE_GENERIC_PUSH_REGS
1407 /* Gcc and probably the DEC/Compaq compiler spill pointers to preserved */
1408 /* fp registers in some cases when the target is a 21264. The assembly */
1409 /* code doesn't handle that yet, and version dependencies make that a */
1410 /* bit tricky. Do the easy thing for now. */
1411# endif
1412# ifdef NETBSD
1413# define OS_TYPE "NETBSD"
1414# define HEURISTIC2
1415# define DATASTART GC_data_start
1416# define ELFCLASS32 32
1417# define ELFCLASS64 64
1418# define ELF_CLASS ELFCLASS64
1419# define DYNAMIC_LOADING
1420# endif
1421# ifdef OPENBSD
1422# define OS_TYPE "OPENBSD"
1423# define HEURISTIC2
1424# ifdef __ELF__ /* since OpenBSD/Alpha 2.9 */
1425# define DATASTART GC_data_start
1426# define ELFCLASS32 32
1427# define ELFCLASS64 64
1428# define ELF_CLASS ELFCLASS64
1429# else /* ECOFF, until OpenBSD/Alpha 2.7 */
1430# define DATASTART ((ptr_t) 0x140000000)
1431# endif
1432# endif
1433# ifdef FREEBSD
1434# define OS_TYPE "FREEBSD"
1435/* MPROTECT_VDB is not yet supported at all on FreeBSD/alpha. */
1436# define SIG_SUSPEND SIGUSR1
1437# define SIG_THR_RESTART SIGUSR2
1438# define FREEBSD_STACKBOTTOM
1439# ifdef __ELF__
1440# define DYNAMIC_LOADING
1441# endif
1442/* Handle unmapped hole alpha*-*-freebsd[45]* puts between etext and edata. */
1443 extern char etext[];
1444 extern char edata[];
1445 extern char end[];
1446# define NEED_FIND_LIMIT
1447# define DATASTART ((ptr_t)(&etext))
1448# define DATAEND (GC_find_limit (DATASTART, TRUE))
1449# define DATASTART2 ((ptr_t)(&edata))
1450# define DATAEND2 ((ptr_t)(&end))
1451# endif
1452# ifdef OSF1
1453# define OS_TYPE "OSF1"
1454# define DATASTART ((ptr_t) 0x140000000)
1455 extern int _end[];
1456# define DATAEND ((ptr_t) &_end)
1457 extern char ** environ;
1458 /* round up from the value of environ to the nearest page boundary */
1459 /* Probably breaks if putenv is called before collector */
1460 /* initialization. */
1461# define STACKBOTTOM ((ptr_t)(((word)(environ) | (getpagesize()-1))+1))
1462/* # define HEURISTIC2 */
1463 /* Normally HEURISTIC2 is too conervative, since */
1464 /* the text segment immediately follows the stack. */
1465 /* Hence we give an upper pound. */
1466 /* This is currently unused, since we disabled HEURISTIC2 */
1467 extern int __start[];
1468# define HEURISTIC2_LIMIT ((ptr_t)((word)(__start) & ~(getpagesize()-1)))
1469# ifndef GC_OSF1_THREADS
1470 /* Unresolved signal issues with threads. */
1471# define MPROTECT_VDB
1472# endif
1473# define DYNAMIC_LOADING
1474# endif
1475# ifdef LINUX
1476# define OS_TYPE "LINUX"
1477# define STACKBOTTOM ((ptr_t) 0x120000000)
1478# ifdef __ELF__
1479# define SEARCH_FOR_DATA_START
1480# define DYNAMIC_LOADING
1481# else
1482# define DATASTART ((ptr_t) 0x140000000)
1483# endif
1484 extern int _end[];
1485# define DATAEND (_end)
1486# define MPROTECT_VDB
1487 /* Has only been superficially tested. May not */
1488 /* work on all versions. */
1489# endif
1490# endif
1491
1492# ifdef IA64
1493# define MACH_TYPE "IA64"
1494# define USE_GENERIC_PUSH_REGS
1495 /* We need to get preserved registers in addition to register */
1496 /* windows. That's easiest to do with setjmp. */
1497# ifdef PARALLEL_MARK
1498# define USE_MARK_BYTES
1499 /* Compare-and-exchange is too expensive to use for */
1500 /* setting mark bits. */
1501# endif
1502# ifdef HPUX
1503# ifdef _ILP32
1504# define CPP_WORDSZ 32
1505# define ALIGN_DOUBLE
1506 /* Requires 8 byte alignment for malloc */
1507# define ALIGNMENT 4
1508# else
1509# ifndef _LP64
1510 ---> unknown ABI
1511# endif
1512# define CPP_WORDSZ 64
1513# define ALIGN_DOUBLE
1514 /* Requires 16 byte alignment for malloc */
1515# define ALIGNMENT 8
1516# endif
1517# define OS_TYPE "HPUX"
1518 extern int __data_start[];
1519# define DATASTART ((ptr_t)(__data_start))
1520 /* Gustavo Rodriguez-Rivera suggested changing HEURISTIC2 */
1521 /* to this. Note that the GC must be initialized before the */
1522 /* first putenv call. */
1523 extern char ** environ;
1524# define STACKBOTTOM ((ptr_t)environ)
1525# define DYNAMIC_LOADING
1526# include <unistd.h>
1527# define GETPAGESIZE() sysconf(_SC_PAGE_SIZE)
1528 /* The following was empirically determined, and is probably */
1529 /* not very robust. */
1530 /* Note that the backing store base seems to be at a nice */
1531 /* address minus one page. */
1532# define BACKING_STORE_DISPLACEMENT 0x1000000
1533# define BACKING_STORE_ALIGNMENT 0x1000
1534# define BACKING_STORE_BASE \
1535 (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1) \
1536 & ~(BACKING_STORE_ALIGNMENT - 1))
1537# endif
1538# ifdef LINUX
1539# define CPP_WORDSZ 64
1540# define ALIGN_DOUBLE
1541 /* Requires 16 byte alignment for malloc */
1542# define ALIGNMENT 8
1543# define OS_TYPE "LINUX"
1544 /* The following works on NUE and older kernels: */
1545/* # define STACKBOTTOM ((ptr_t) 0xa000000000000000l) */
1546 /* This does not work on NUE: */
1547# define LINUX_STACKBOTTOM
1548 /* We also need the base address of the register stack */
1549 /* backing store. This is computed in */
1550 /* GC_linux_register_stack_base based on the following */
1551 /* constants: */
1552# define BACKING_STORE_ALIGNMENT 0x100000
1553# define BACKING_STORE_DISPLACEMENT 0x80000000
1554 extern char * GC_register_stackbottom;
1555# define BACKING_STORE_BASE ((ptr_t)GC_register_stackbottom)
1556# define SEARCH_FOR_DATA_START
1557# ifdef __GNUC__
1558# define DYNAMIC_LOADING
1559# else
1560 /* In the Intel compiler environment, we seem to end up with */
1561 /* statically linked executables and an undefined reference */
1562 /* to _DYNAMIC */
1563# endif
1564# define MPROTECT_VDB
1565 /* Requires Linux 2.3.47 or later. */
1566 extern int _end[];
1567# define DATAEND (_end)
1568# ifdef __GNUC__
1569# define PREFETCH(x) \
1570 __asm__ (" lfetch [%0]": : "r"((void *)(x)))
1571# define PREFETCH_FOR_WRITE(x) \
1572 __asm__ (" lfetch.excl [%0]": : "r"((void *)(x)))
1573# define CLEAR_DOUBLE(x) \
1574 __asm__ (" stf.spill [%0]=f0": : "r"((void *)(x)))
1575# endif
1576# endif
1577# endif
1578
1579# ifdef M88K
1580# define MACH_TYPE "M88K"
1581# define ALIGNMENT 4
1582# define ALIGN_DOUBLE
1583 extern int etext[];
1584# ifdef CX_UX
1585# define OS_TYPE "CX_UX"
1586# define DATASTART ((((word)etext + 0x3fffff) & ~0x3fffff) + 0x10000)
1587# endif
1588# ifdef DGUX
1589# define OS_TYPE "DGUX"
1590 extern ptr_t GC_SysVGetDataStart();
1591# define DATASTART GC_SysVGetDataStart(0x10000, etext)
1592# endif
1593# define STACKBOTTOM ((char*)0xf0000000) /* determined empirically */
1594# endif
1595
1596# ifdef S370
1597 /* If this still works, and if anyone cares, this should probably */
1598 /* be moved to the S390 category. */
1599# define MACH_TYPE "S370"
1600# define ALIGNMENT 4 /* Required by hardware */
1601# define USE_GENERIC_PUSH_REGS
1602# ifdef UTS4
1603# define OS_TYPE "UTS4"
1604 extern int etext[];
1605 extern int _etext[];
1606 extern int _end[];
1607 extern ptr_t GC_SysVGetDataStart();
1608# define DATASTART GC_SysVGetDataStart(0x10000, _etext)
1609# define DATAEND (_end)
1610# define HEURISTIC2
1611# endif
1612# endif
1613
1614# ifdef S390
1615# define MACH_TYPE "S390"
1616# define USE_GENERIC_PUSH_REGS
1617# ifndef __s390x__
1618# define ALIGNMENT 4
1619# define CPP_WORDSZ 32
1620# else
1621# define ALIGNMENT 8
1622# define CPP_WORDSZ 64
1623# define HBLKSIZE 4096
1624# endif
1625# ifdef LINUX
1626# define OS_TYPE "LINUX"
1627# define LINUX_STACKBOTTOM
1628# define DYNAMIC_LOADING
1629 extern int __data_start[];
1630# define DATASTART ((ptr_t)(__data_start))
1631 extern int _end[];
1632# define DATAEND (_end)
1633# define CACHE_LINE_SIZE 256
1634# define GETPAGESIZE() 4096
1635# endif
1636# endif
1637
1638# if defined(PJ)
1639# define ALIGNMENT 4
1640 extern int _etext[];
1641# define DATASTART ((ptr_t)(_etext))
1642# define HEURISTIC1
1643# endif
1644
1645# ifdef ARM32
1646# define CPP_WORDSZ 32
1647# define MACH_TYPE "ARM32"
1648# define ALIGNMENT 4
1649# ifdef NETBSD
1650# define OS_TYPE "NETBSD"
1651# define HEURISTIC2
1652 extern char etext[];
1653# define DATASTART ((ptr_t)(etext))
1654# define USE_GENERIC_PUSH_REGS
1655# endif
1656# ifdef LINUX
1657# define OS_TYPE "LINUX"
1658# define HEURISTIC1
1659# undef STACK_GRAN
1660# define STACK_GRAN 0x10000000
1661# define USE_GENERIC_PUSH_REGS
1662# ifdef __ELF__
1663# define DYNAMIC_LOADING
1664# include <features.h>
1665# if defined(__GLIBC__) && __GLIBC__ >= 2
1666# define SEARCH_FOR_DATA_START
1667# else
1668 extern char **__environ;
1669# define DATASTART ((ptr_t)(&__environ))
1670 /* hideous kludge: __environ is the first */
1671 /* word in crt0.o, and delimits the start */
1672 /* of the data segment, no matter which */
1673 /* ld options were passed through. */
1674 /* We could use _etext instead, but that */
1675 /* would include .rodata, which may */
1676 /* contain large read-only data tables */
1677 /* that we'd rather not scan. */
1678# endif
1679 extern int _end[];
1680# define DATAEND (_end)
1681# else
1682 extern int etext[];
1683# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
1684# endif
1685# endif
1686# ifdef MSWINCE
1687# define OS_TYPE "MSWINCE"
1688# define DATAEND /* not needed */
1689# endif
1690# ifdef NOSYS
1691 /* __data_start is usually defined in the target linker script. */
1692 extern int __data_start[];
1693# define DATASTART (ptr_t)(__data_start)
1694# define USE_GENERIC_PUSH_REGS
1695 /* __stack_base__ is set in newlib/libc/sys/arm/crt0.S */
1696 extern void *__stack_base__;
1697# define STACKBOTTOM ((ptr_t) (__stack_base__))
1698# endif
1699#endif
1700
1701# ifdef SH
1702# define MACH_TYPE "SH"
1703# define ALIGNMENT 4
1704# ifdef MSWINCE
1705# define OS_TYPE "MSWINCE"
1706# define DATAEND /* not needed */
1707# endif
1708# ifdef LINUX
1709# define OS_TYPE "LINUX"
1710# define STACKBOTTOM ((ptr_t) 0x7c000000)
1711# define USE_GENERIC_PUSH_REGS
1712# define DYNAMIC_LOADING
1713# define SEARCH_FOR_DATA_START
1714 extern int _end[];
1715# define DATAEND (_end)
1716# endif
1717# endif
1718
1719# ifdef SH4
1720# define MACH_TYPE "SH4"
1721# define OS_TYPE "MSWINCE"
1722# define ALIGNMENT 4
1723# define DATAEND /* not needed */
1724# endif
1725
1726# ifdef X86_64
1727# define MACH_TYPE "X86_64"
1728# define ALIGNMENT 8
1729# define CPP_WORDSZ 64
1730# ifndef HBLKSIZE
1731# define HBLKSIZE 4096
1732# endif
1733# define CACHE_LINE_SIZE 64
1734# define USE_GENERIC_PUSH_REGS
1735# ifdef LINUX
1736# define OS_TYPE "LINUX"
1737# define LINUX_STACKBOTTOM
1738# if !defined(GC_LINUX_THREADS) || !defined(REDIRECT_MALLOC)
1739# define MPROTECT_VDB
1740# else
1741 /* We seem to get random errors in incremental mode, */
1742 /* possibly because Linux threads is itself a malloc client */
1743 /* and can't deal with the signals. */
1744# endif
1745# ifdef __ELF__
1746# define DYNAMIC_LOADING
1747# ifdef UNDEFINED /* includes ro data */
1748 extern int _etext[];
1749# define DATASTART ((ptr_t)((((word) (_etext)) + 0xfff) & ~0xfff))
1750# endif
1751# include <features.h>
1752# define SEARCH_FOR_DATA_START
1753 extern int _end[];
1754# define DATAEND (_end)
1755# else
1756 extern int etext[];
1757# define DATASTART ((ptr_t)((((word) (etext)) + 0xfff) & ~0xfff))
1758# endif
1759# define PREFETCH(x) \
1760 __asm__ __volatile__ (" prefetch %0": : "m"(*(char *)(x)))
1761# define PREFETCH_FOR_WRITE(x) \
1762 __asm__ __volatile__ (" prefetchw %0": : "m"(*(char *)(x)))
1763# endif
1764# endif
1765
1766#if defined(LINUX) && defined(REDIRECT_MALLOC)
1767 /* Rld appears to allocate some memory with its own allocator, and */
1768 /* some through malloc, which might be redirected. To make this */
1769 /* work with collectable memory, we have to scan memory allocated */
1770 /* by rld's internal malloc. */
1771# define USE_PROC_FOR_LIBRARIES
1772#endif
1773
1774# ifndef STACK_GROWS_UP
1775# define STACK_GROWS_DOWN
1776# endif
1777
1778# ifndef CPP_WORDSZ
1779# define CPP_WORDSZ 32
1780# endif
1781
1782# ifndef OS_TYPE
1783# define OS_TYPE ""
1784# endif
1785
1786# ifndef DATAEND
1787 extern int end[];
1788# define DATAEND (end)
1789# endif
1790
1791# if defined(SVR4) && !defined(GETPAGESIZE)
1792# include <unistd.h>
1793# define GETPAGESIZE() sysconf(_SC_PAGESIZE)
1794# endif
1795
1796# ifndef GETPAGESIZE
1797# if defined(SUNOS5) || defined(IRIX5)
1798# include <unistd.h>
1799# endif
1800# define GETPAGESIZE() getpagesize()
1801# endif
1802
1803# if defined(SUNOS5) || defined(DRSNX) || defined(UTS4)
1804 /* OS has SVR4 generic features. Probably others also qualify. */
1805# define SVR4
1806# endif
1807
1808# if defined(SUNOS5) || defined(DRSNX)
1809 /* OS has SUNOS5 style semi-undocumented interface to dynamic */
1810 /* loader. */
1811# define SUNOS5DL
1812 /* OS has SUNOS5 style signal handlers. */
1813# define SUNOS5SIGS
1814# endif
1815
1816# if defined(HPUX)
1817# define SUNOS5SIGS
1818# endif
1819
1820# if defined(SVR4) || defined(LINUX) || defined(IRIX) || defined(HPUX) \
1821 || defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD) || defined(DGUX) \
1822 || defined(BSD) || defined(AIX) || defined(MACOSX) || defined(OSF1)
1823# define UNIX_LIKE /* Basic Unix-like system calls work. */
1824# endif
1825
1826# if CPP_WORDSZ != 32 && CPP_WORDSZ != 64
1827 -> bad word size
1828# endif
1829
1830# ifdef PCR
1831# undef DYNAMIC_LOADING
1832# undef STACKBOTTOM
1833# undef HEURISTIC1
1834# undef HEURISTIC2
1835# undef PROC_VDB
1836# undef MPROTECT_VDB
1837# define PCR_VDB
1838# endif
1839
1840# ifdef SRC_M3
1841 /* Postponed for now. */
1842# undef PROC_VDB
1843# undef MPROTECT_VDB
1844# endif
1845
1846# ifdef SMALL_CONFIG
1847 /* Presumably not worth the space it takes. */
1848# undef PROC_VDB
1849# undef MPROTECT_VDB
1850# endif
1851
1852# ifdef USE_MUNMAP
1853# undef MPROTECT_VDB /* Can't deal with address space holes. */
1854# endif
1855
1856# ifdef PARALLEL_MARK
1857# undef MPROTECT_VDB /* For now. */
1858# endif
1859
1860# if !defined(PCR_VDB) && !defined(PROC_VDB) && !defined(MPROTECT_VDB)
1861# define DEFAULT_VDB
1862# endif
1863
1864# ifndef PREFETCH
1865# define PREFETCH(x)
1866# define NO_PREFETCH
1867# endif
1868
1869# ifndef PREFETCH_FOR_WRITE
1870# define PREFETCH_FOR_WRITE(x)
1871# define NO_PREFETCH_FOR_WRITE
1872# endif
1873
1874# ifndef CACHE_LINE_SIZE
1875# define CACHE_LINE_SIZE 32 /* Wild guess */
1876# endif
1877
1878# ifdef LINUX
1879# define REGISTER_LIBRARIES_EARLY
1880 /* We sometimes use dl_iterate_phdr, which may acquire an internal */
1881 /* lock. This isn't safe after the world has stopped. So we must */
1882 /* call GC_register_dynamic_libraries before stopping the world. */
1883 /* For performance reasons, this may be beneficial on other */
1884 /* platforms as well, though it should be avoided in win32. */
1885# endif /* LINUX */
1886
1887# if defined(SEARCH_FOR_DATA_START) && defined(GC_PRIVATE_H)
1888 extern ptr_t GC_data_start;
1889# define DATASTART GC_data_start
1890# endif
1891
1892# ifndef CLEAR_DOUBLE
1893# define CLEAR_DOUBLE(x) \
1894 ((word*)x)[0] = 0; \
1895 ((word*)x)[1] = 0;
1896# endif /* CLEAR_DOUBLE */
1897
1898 /* Internally we use GC_SOLARIS_THREADS to test for either old or pthreads. */
1899# if defined(GC_SOLARIS_PTHREADS) && !defined(GC_SOLARIS_THREADS)
1900# define GC_SOLARIS_THREADS
1901# endif
1902
1903# if defined(GC_IRIX_THREADS) && !defined(IRIX5)
1904 --> inconsistent configuration
1905# endif
1906# if defined(GC_LINUX_THREADS) && !defined(LINUX)
1907 --> inconsistent configuration
1908# endif
1909# if defined(GC_SOLARIS_THREADS) && !defined(SUNOS5)
1910 --> inconsistent configuration
1911# endif
1912# if defined(GC_HPUX_THREADS) && !defined(HPUX)
1913 --> inconsistent configuration
1914# endif
1915# if defined(GC_WIN32_THREADS) && !defined(MSWIN32) && !defined(CYGWIN32)
1916 --> inconsistent configuration
1917# endif
1918
1919# if defined(PCR) || defined(SRC_M3) || \
1920 defined(GC_SOLARIS_THREADS) || defined(GC_WIN32_THREADS) || \
1921 defined(GC_PTHREADS)
1922# define THREADS
1923# endif
1924
1925# if defined(HP_PA) || defined(M88K) || defined(POWERPC) && !defined(MACOSX) \
1926 || defined(LINT) || defined(MSWINCE) || defined(ARM32) \
1927 || (defined(I386) && defined(__LCC__))
1928 /* Use setjmp based hack to mark from callee-save registers. */
1929 /* The define should move to the individual platform */
1930 /* descriptions. */
1931# define USE_GENERIC_PUSH_REGS
1932# endif
1933
1934# if defined(SPARC)
1935# define ASM_CLEAR_CODE /* Stack clearing is crucial, and we */
1936 /* include assembly code to do it well. */
1937# endif
1938
1939 /* Can we save call chain in objects for debugging? */
1940 /* SET NFRAMES (# of saved frames) and NARGS (#of args for each */
1941 /* frame) to reasonable values for the platform. */
1942 /* Set SAVE_CALL_CHAIN if we can. SAVE_CALL_COUNT can be specified */
1943 /* at build time, though we feel free to adjust it slightly. */
1944 /* Define NEED_CALLINFO if we either save the call stack or */
1945 /* GC_ADD_CALLER is defined. */
1946 /* GC_CAN_SAVE_CALL_STACKS is set in gc.h. */
1947
1948#if defined(SPARC)
1949# define CAN_SAVE_CALL_ARGS
1950#endif
1951#if (defined(I386) || defined(X86_64)) && defined(LINUX)
1952 /* SAVE_CALL_CHAIN is supported if the code is compiled to save */
1953 /* frame pointers by default, i.e. no -fomit-frame-pointer flag. */
1954# define CAN_SAVE_CALL_ARGS
1955#endif
1956
1957# if defined(SAVE_CALL_COUNT) && !defined(GC_ADD_CALLER) \
1958 && defined(GC_CAN_SAVE_CALL_STACKS)
1959# define SAVE_CALL_CHAIN
1960# endif
1961# ifdef SAVE_CALL_CHAIN
1962# if defined(SAVE_CALL_NARGS) && defined(CAN_SAVE_CALL_ARGS)
1963# define NARGS SAVE_CALL_NARGS
1964# else
1965# define NARGS 0 /* Number of arguments to save for each call. */
1966# endif
1967# endif
1968# ifdef SAVE_CALL_CHAIN
1969# ifndef SAVE_CALL_COUNT
1970# define NFRAMES 6 /* Number of frames to save. Even for */
1971 /* alignment reasons. */
1972# else
1973# define NFRAMES ((SAVE_CALL_COUNT + 1) & ~1)
1974# endif
1975# define NEED_CALLINFO
1976# endif /* SAVE_CALL_CHAIN */
1977# ifdef GC_ADD_CALLER
1978# define NFRAMES 1
1979# define NARGS 0
1980# define NEED_CALLINFO
1981# endif
1982
1983# if defined(MAKE_BACK_GRAPH) && !defined(DBG_HDRS_ALL)
1984# define DBG_HDRS_ALL
1985# endif
1986
1987# if defined(POINTER_MASK) && !defined(POINTER_SHIFT)
1988# define POINTER_SHIFT 0
1989# endif
1990
1991# if defined(POINTER_SHIFT) && !defined(POINTER_MASK)
1992# define POINTER_MASK ((GC_word)(-1))
1993# endif
1994
1995# if !defined(FIXUP_POINTER) && defined(POINTER_MASK)
1996# define FIXUP_POINTER(p) (p) = ((p) & (POINTER_MASK) << POINTER_SHIFT)
1997# endif
1998
1999# if defined(FIXUP_POINTER)
2000# define NEED_FIXUP_POINTER 1
2001# else
2002# define NEED_FIXUP_POINTER 0
2003# define FIXUP_POINTER(p)
2004# endif
2005
2006#ifdef GC_PRIVATE_H
2007 /* This relies on some type definitions from gc_priv.h, from */
2008 /* where it's normally included. */
2009 /* */
2010 /* How to get heap memory from the OS: */
2011 /* Note that sbrk()-like allocation is preferred, since it */
2012 /* usually makes it possible to merge consecutively allocated */
2013 /* chunks. It also avoids unintented recursion with */
2014 /* -DREDIRECT_MALLOC. */
2015 /* GET_MEM() returns a HLKSIZE aligned chunk. */
2016 /* 0 is taken to mean failure. */
2017 /* In the case os USE_MMAP, the argument must also be a */
2018 /* physical page size. */
2019 /* GET_MEM is currently not assumed to retrieve 0 filled space, */
2020 /* though we should perhaps take advantage of the case in which */
2021 /* does. */
2022 struct hblk; /* See gc_priv.h. */
2023# ifdef PCR
2024 char * real_malloc();
2025# define GET_MEM(bytes) HBLKPTR(real_malloc((size_t)bytes + GC_page_size) \
2026 + GC_page_size-1)
2027# else
2028# ifdef OS2
2029 void * os2_alloc(size_t bytes);
2030# define GET_MEM(bytes) HBLKPTR((ptr_t)os2_alloc((size_t)bytes \
2031 + GC_page_size) \
2032 + GC_page_size-1)
2033# else
2034# if defined(NEXT) || defined(DOS4GW) || \
2035 (defined(AMIGA) && !defined(GC_AMIGA_FASTALLOC)) || \
2036 (defined(SUNOS5) && !defined(USE_MMAP))
2037# define GET_MEM(bytes) HBLKPTR((size_t) \
2038 calloc(1, (size_t)bytes + GC_page_size) \
2039 + GC_page_size-1)
2040# else
2041# ifdef MSWIN32
2042# ifdef GC_PRIVATE_H
2043 extern ptr_t GC_win32_get_mem();
2044# endif
2045# define GET_MEM(bytes) (struct hblk *)GC_win32_get_mem(bytes)
2046# else
2047# ifdef MACOS
2048# if defined(USE_TEMPORARY_MEMORY)
2049 extern Ptr GC_MacTemporaryNewPtr(size_t size,
2050 Boolean clearMemory);
2051# define GET_MEM(bytes) HBLKPTR( \
2052 GC_MacTemporaryNewPtr(bytes + GC_page_size, true) \
2053 + GC_page_size-1)
2054# else
2055# define GET_MEM(bytes) HBLKPTR( \
2056 NewPtrClear(bytes + GC_page_size) + GC_page_size-1)
2057# endif
2058# else
2059# ifdef MSWINCE
2060# ifdef GC_PRIVATE_H
2061 extern ptr_t GC_wince_get_mem();
2062# endif
2063# define GET_MEM(bytes) (struct hblk *)GC_wince_get_mem(bytes)
2064# else
2065# if defined(AMIGA) && defined(GC_AMIGA_FASTALLOC)
2066 extern void *GC_amiga_get_mem(size_t size);
2067 define GET_MEM(bytes) HBLKPTR((size_t) \
2068 GC_amiga_get_mem((size_t)bytes + GC_page_size) \
2069 + GC_page_size-1)
2070# else
2071# ifdef GC_PRIVATE_H
2072 extern ptr_t GC_unix_get_mem();
2073# endif
2074# define GET_MEM(bytes) (struct hblk *)GC_unix_get_mem(bytes)
2075# endif
2076# endif
2077# endif
2078# endif
2079# endif
2080# endif
2081# endif
2082
2083#endif /* GC_PRIVATE_H */
2084
2085# endif /* GCCONFIG_H */
diff --git a/gc/include/private/solaris_threads.h b/gc/include/private/solaris_threads.h
deleted file mode 100644
index 7d49c2987e0..00000000000
--- a/gc/include/private/solaris_threads.h
+++ /dev/null
@@ -1,35 +0,0 @@
1#ifdef GC_SOLARIS_THREADS
2
3/* The set of all known threads. We intercept thread creation and */
4/* joins. We never actually create detached threads. We allocate all */
5/* new thread stacks ourselves. These allow us to maintain this */
6/* data structure. */
7/* Protected by GC_thr_lock. */
8/* Some of this should be declared volatile, but that's incosnsistent */
9/* with some library routine declarations. In particular, the */
10/* definition of cond_t doesn't mention volatile! */
11 typedef struct GC_Thread_Rep {
12 struct GC_Thread_Rep * next;
13 thread_t id;
14 word flags;
15# define FINISHED 1 /* Thread has exited. */
16# define DETACHED 2 /* Thread is intended to be detached. */
17# define CLIENT_OWNS_STACK 4
18 /* Stack was supplied by client. */
19# define SUSPNDED 8 /* Currently suspended. */
20 /* SUSPENDED is used insystem header. */
21 ptr_t stack;
22 size_t stack_size;
23 cond_t join_cv;
24 void * status;
25 } * GC_thread;
26 extern GC_thread GC_new_thread(thread_t id);
27
28 extern GC_bool GC_thr_initialized;
29 extern volatile GC_thread GC_threads[];
30 extern size_t GC_min_stack_sz;
31 extern size_t GC_page_sz;
32 extern void GC_thr_init(void);
33
34# endif /* GC_SOLARIS_THREADS */
35
diff --git a/gc/include/private/specific.h b/gc/include/private/specific.h
deleted file mode 100644
index d04e19f5a4b..00000000000
--- a/gc/include/private/specific.h
+++ /dev/null
@@ -1,95 +0,0 @@
1/*
2 * This is a reimplementation of a subset of the pthread_getspecific/setspecific
3 * interface. This appears to outperform the standard linuxthreads one
4 * by a significant margin.
5 * The major restriction is that each thread may only make a single
6 * pthread_setspecific call on a single key. (The current data structure
7 * doesn't really require that. The restriction should be easily removable.)
8 * We don't currently support the destruction functions, though that
9 * could be done.
10 * We also currently assume that only one pthread_setspecific call
11 * can be executed at a time, though that assumption would be easy to remove
12 * by adding a lock.
13 */
14
15#include <errno.h>
16
17/* Called during key creation or setspecific. */
18/* For the GC we already hold lock. */
19/* Currently allocated objects leak on thread exit. */
20/* That's hard to fix, but OK if we allocate garbage */
21/* collected memory. */
22#define MALLOC_CLEAR(n) GC_INTERNAL_MALLOC(n, NORMAL)
23#define PREFIXED(name) GC_##name
24
25#define TS_CACHE_SIZE 1024
26#define CACHE_HASH(n) (((((long)n) >> 8) ^ (long)n) & (TS_CACHE_SIZE - 1))
27#define TS_HASH_SIZE 1024
28#define HASH(n) (((((long)n) >> 8) ^ (long)n) & (TS_HASH_SIZE - 1))
29
30/* An entry describing a thread-specific value for a given thread. */
31/* All such accessible structures preserve the invariant that if either */
32/* thread is a valid pthread id or qtid is a valid "quick tread id" */
33/* for a thread, then value holds the corresponding thread specific */
34/* value. This invariant must be preserved at ALL times, since */
35/* asynchronous reads are allowed. */
36typedef struct thread_specific_entry {
37 unsigned long qtid; /* quick thread id, only for cache */
38 void * value;
39 struct thread_specific_entry *next;
40 pthread_t thread;
41} tse;
42
43
44/* We represent each thread-specific datum as two tables. The first is */
45/* a cache, indexed by a "quick thread identifier". The "quick" thread */
46/* identifier is an easy to compute value, which is guaranteed to */
47/* determine the thread, though a thread may correspond to more than */
48/* one value. We typically use the address of a page in the stack. */
49/* The second is a hash table, indexed by pthread_self(). It is used */
50/* only as a backup. */
51
52/* Return the "quick thread id". Default version. Assumes page size, */
53/* or at least thread stack separation, is at least 4K. */
54/* Must be defined so that it never returns 0. (Page 0 can't really */
55/* be part of any stack, since that would make 0 a valid stack pointer.)*/
56static __inline__ unsigned long quick_thread_id() {
57 int dummy;
58 return (unsigned long)(&dummy) >> 12;
59}
60
61#define INVALID_QTID ((unsigned long)0)
62#define INVALID_THREADID ((pthread_t)0)
63
64typedef struct thread_specific_data {
65 tse * volatile cache[TS_CACHE_SIZE];
66 /* A faster index to the hash table */
67 tse * hash[TS_HASH_SIZE];
68 pthread_mutex_t lock;
69} tsd;
70
71typedef tsd * PREFIXED(key_t);
72
73extern int PREFIXED(key_create) (tsd ** key_ptr, void (* destructor)(void *));
74
75extern int PREFIXED(setspecific) (tsd * key, void * value);
76
77extern void PREFIXED(remove_specific) (tsd * key);
78
79/* An internal version of getspecific that assumes a cache miss. */
80void * PREFIXED(slow_getspecific) (tsd * key, unsigned long qtid,
81 tse * volatile * cache_entry);
82
83static __inline__ void * PREFIXED(getspecific) (tsd * key) {
84 long qtid = quick_thread_id();
85 unsigned hash_val = CACHE_HASH(qtid);
86 tse * volatile * entry_ptr = key -> cache + hash_val;
87 tse * entry = *entry_ptr; /* Must be loaded only once. */
88 if (EXPECT(entry -> qtid == qtid, 1)) {
89 GC_ASSERT(entry -> thread == pthread_self());
90 return entry -> value;
91 }
92 return PREFIXED(slow_getspecific) (key, qtid, entry_ptr);
93}
94
95
diff --git a/gc/include/weakpointer.h b/gc/include/weakpointer.h
deleted file mode 100644
index 84906b00a68..00000000000
--- a/gc/include/weakpointer.h
+++ /dev/null
@@ -1,221 +0,0 @@
1#ifndef _weakpointer_h_
2#define _weakpointer_h_
3
4/****************************************************************************
5
6WeakPointer and CleanUp
7
8 Copyright (c) 1991 by Xerox Corporation. All rights reserved.
9
10 THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12
13 Permission is hereby granted to copy this code for any purpose,
14 provided the above notices are retained on all copies.
15
16 Last modified on Mon Jul 17 18:16:01 PDT 1995 by ellis
17
18****************************************************************************/
19
20/****************************************************************************
21
22WeakPointer
23
24A weak pointer is a pointer to a heap-allocated object that doesn't
25prevent the object from being garbage collected. Weak pointers can be
26used to track which objects haven't yet been reclaimed by the
27collector. A weak pointer is deactivated when the collector discovers
28its referent object is unreachable by normal pointers (reachability
29and deactivation are defined more precisely below). A deactivated weak
30pointer remains deactivated forever.
31
32****************************************************************************/
33
34
35template< class T > class WeakPointer {
36public:
37
38WeakPointer( T* t = 0 )
39 /* Constructs a weak pointer for *t. t may be null. It is an error
40 if t is non-null and *t is not a collected object. */
41 {impl = _WeakPointer_New( t );}
42
43T* Pointer()
44 /* wp.Pointer() returns a pointer to the referent object of wp or
45 null if wp has been deactivated (because its referent object
46 has been discovered unreachable by the collector). */
47 {return (T*) _WeakPointer_Pointer( this->impl );}
48
49int operator==( WeakPointer< T > wp2 )
50 /* Given weak pointers wp1 and wp2, if wp1 == wp2, then wp1 and
51 wp2 refer to the same object. If wp1 != wp2, then either wp1
52 and wp2 don't refer to the same object, or if they do, one or
53 both of them has been deactivated. (Note: If objects t1 and t2
54 are never made reachable by their clean-up functions, then
55 WeakPointer<T>(t1) == WeakPointer<T>(t2) if and only t1 == t2.) */
56 {return _WeakPointer_Equal( this->impl, wp2.impl );}
57
58int Hash()
59 /* Returns a hash code suitable for use by multiplicative- and
60 division-based hash tables. If wp1 == wp2, then wp1.Hash() ==
61 wp2.Hash(). */
62 {return _WeakPointer_Hash( this->impl );}
63
64private:
65void* impl;
66};
67
68/*****************************************************************************
69
70CleanUp
71
72A garbage-collected object can have an associated clean-up function
73that will be invoked some time after the collector discovers the
74object is unreachable via normal pointers. Clean-up functions can be
75used to release resources such as open-file handles or window handles
76when their containing objects become unreachable. If a C++ object has
77a non-empty explicit destructor (i.e. it contains programmer-written
78code), the destructor will be automatically registered as the object's
79initial clean-up function.
80
81There is no guarantee that the collector will detect every unreachable
82object (though it will find almost all of them). Clients should not
83rely on clean-up to cause some action to occur immediately -- clean-up
84is only a mechanism for improving resource usage.
85
86Every object with a clean-up function also has a clean-up queue. When
87the collector finds the object is unreachable, it enqueues it on its
88queue. The clean-up function is applied when the object is removed
89from the queue. By default, objects are enqueued on the garbage
90collector's queue, and the collector removes all objects from its
91queue after each collection. If a client supplies another queue for
92objects, it is his responsibility to remove objects (and cause their
93functions to be called) by polling it periodically.
94
95Clean-up queues allow clean-up functions accessing global data to
96synchronize with the main program. Garbage collection can occur at any
97time, and clean-ups invoked by the collector might access data in an
98inconsistent state. A client can control this by defining an explicit
99queue for objects and polling it at safe points.
100
101The following definitions are used by the specification below:
102
103Given a pointer t to a collected object, the base object BO(t) is the
104value returned by new when it created the object. (Because of multiple
105inheritance, t and BO(t) may not be the same address.)
106
107A weak pointer wp references an object *t if BO(wp.Pointer()) ==
108BO(t).
109
110***************************************************************************/
111
112template< class T, class Data > class CleanUp {
113public:
114
115static void Set( T* t, void c( Data* d, T* t ), Data* d = 0 )
116 /* Sets the clean-up function of object BO(t) to be <c, d>,
117 replacing any previously defined clean-up function for BO(t); c
118 and d can be null, but t cannot. Sets the clean-up queue for
119 BO(t) to be the collector's queue. When t is removed from its
120 clean-up queue, its clean-up will be applied by calling c(d,
121 t). It is an error if *t is not a collected object. */
122 {_CleanUp_Set( t, c, d );}
123
124static void Call( T* t )
125 /* Sets the new clean-up function for BO(t) to be null and, if the
126 old one is non-null, calls it immediately, even if BO(t) is
127 still reachable. Deactivates any weak pointers to BO(t). */
128 {_CleanUp_Call( t );}
129
130class Queue {public:
131 Queue()
132 /* Constructs a new queue. */
133 {this->head = _CleanUp_Queue_NewHead();}
134
135 void Set( T* t )
136 /* q.Set(t) sets the clean-up queue of BO(t) to be q. */
137 {_CleanUp_Queue_Set( this->head, t );}
138
139 int Call()
140 /* If q is non-empty, q.Call() removes the first object and
141 calls its clean-up function; does nothing if q is
142 empty. Returns true if there are more objects in the
143 queue. */
144 {return _CleanUp_Queue_Call( this->head );}
145
146 private:
147 void* head;
148 };
149};
150
151/**********************************************************************
152
153Reachability and Clean-up
154
155An object O is reachable if it can be reached via a non-empty path of
156normal pointers from the registers, stacks, global variables, or an
157object with a non-null clean-up function (including O itself),
158ignoring pointers from an object to itself.
159
160This definition of reachability ensures that if object B is accessible
161from object A (and not vice versa) and if both A and B have clean-up
162functions, then A will always be cleaned up before B. Note that as
163long as an object with a clean-up function is contained in a cycle of
164pointers, it will always be reachable and will never be cleaned up or
165collected.
166
167When the collector finds an unreachable object with a null clean-up
168function, it atomically deactivates all weak pointers referencing the
169object and recycles its storage. If object B is accessible from object
170A via a path of normal pointers, A will be discovered unreachable no
171later than B, and a weak pointer to A will be deactivated no later
172than a weak pointer to B.
173
174When the collector finds an unreachable object with a non-null
175clean-up function, the collector atomically deactivates all weak
176pointers referencing the object, redefines its clean-up function to be
177null, and enqueues it on its clean-up queue. The object then becomes
178reachable again and remains reachable at least until its clean-up
179function executes.
180
181The clean-up function is assured that its argument is the only
182accessible pointer to the object. Nothing prevents the function from
183redefining the object's clean-up function or making the object
184reachable again (for example, by storing the pointer in a global
185variable).
186
187If the clean-up function does not make its object reachable again and
188does not redefine its clean-up function, then the object will be
189collected by a subsequent collection (because the object remains
190unreachable and now has a null clean-up function). If the clean-up
191function does make its object reachable again and a clean-up function
192is subsequently redefined for the object, then the new clean-up
193function will be invoked the next time the collector finds the object
194unreachable.
195
196Note that a destructor for a collected object cannot safely redefine a
197clean-up function for its object, since after the destructor executes,
198the object has been destroyed into "raw memory". (In most
199implementations, destroying an object mutates its vtbl.)
200
201Finally, note that calling delete t on a collected object first
202deactivates any weak pointers to t and then invokes its clean-up
203function (destructor).
204
205**********************************************************************/
206
207extern "C" {
208 void* _WeakPointer_New( void* t );
209 void* _WeakPointer_Pointer( void* wp );
210 int _WeakPointer_Equal( void* wp1, void* wp2 );
211 int _WeakPointer_Hash( void* wp );
212 void _CleanUp_Set( void* t, void (*c)( void* d, void* t ), void* d );
213 void _CleanUp_Call( void* t );
214 void* _CleanUp_Queue_NewHead ();
215 void _CleanUp_Queue_Set( void* h, void* t );
216 int _CleanUp_Queue_Call( void* h );
217}
218
219#endif /* _weakpointer_h_ */
220
221