aboutsummaryrefslogtreecommitdiffstats
path: root/mps/code
diff options
context:
space:
mode:
authorRichard Brooksby2012-09-06 17:17:18 +0100
committerRichard Brooksby2012-09-06 17:17:18 +0100
commit858e4ac0ac8ee684f48f0edd9d80ae28b17aee53 (patch)
tree5034519c869b370df2c87394c03f7f30e78945b9 /mps/code
parent383335816d888b5f28fe7b034106dc2056f56620 (diff)
downloademacs-858e4ac0ac8ee684f48f0edd9d80ae28b17aee53.tar.gz
emacs-858e4ac0ac8ee684f48f0edd9d80ae28b17aee53.zip
Partial merge of branch/2012-07-23/cet-transform, excluding cet-specific parts.
Copied from Perforce Change: 179309 ServerID: perforce.ravenbrook.com
Diffstat (limited to 'mps/code')
-rw-r--r--mps/code/comm.gmk5
-rw-r--r--mps/code/commpost.nmk2
-rw-r--r--mps/code/eventpro.c32
-rw-r--r--mps/code/fmtdytst.h9
-rw-r--r--mps/code/global.c50
-rw-r--r--mps/code/mpm.h15
-rw-r--r--mps/code/mpmst.h14
-rw-r--r--mps/code/mpmtypes.h5
-rw-r--r--mps/code/mps.c6
-rw-r--r--mps/code/mps.h7
-rw-r--r--mps/code/mpsi.c6
-rw-r--r--mps/code/pool.c3
-rw-r--r--mps/code/poolamc.c79
-rw-r--r--mps/code/protsgix.c2
-rw-r--r--mps/code/ss.c106
-rw-r--r--mps/code/ss.h30
-rw-r--r--mps/code/ssixi3.c10
-rw-r--r--mps/code/ssixi6.c16
-rwxr-xr-xmps/code/ssw3i3mv.c35
-rwxr-xr-xmps/code/ssw3i6mv.c (renamed from mps/code/ssw3mv.c)24
-rw-r--r--mps/code/table.c327
-rw-r--r--mps/code/table.h38
-rw-r--r--mps/code/testlib.c9
-rw-r--r--mps/code/testlib.h7
-rw-r--r--mps/code/trace.c291
-rw-r--r--mps/code/traceanc.c7
-rwxr-xr-xmps/code/w3build.bat1
-rw-r--r--mps/code/w3i3mv.nmk6
-rw-r--r--mps/code/w3i6mv.nmk4
-rw-r--r--mps/code/walk.c49
30 files changed, 802 insertions, 393 deletions
diff --git a/mps/code/comm.gmk b/mps/code/comm.gmk
index f67802daf56..3b775882bc3 100644
--- a/mps/code/comm.gmk
+++ b/mps/code/comm.gmk
@@ -150,7 +150,8 @@ MPMCOMMON = mpsi.c mpm.c arenavm.c arenacl.c arena.c global.c locus.c \
150 trace.c traceanc.c root.c seg.c format.c buffer.c ref.c \ 150 trace.c traceanc.c root.c seg.c format.c buffer.c ref.c \
151 bt.c ring.c shield.c ld.c event.c sac.c message.c \ 151 bt.c ring.c shield.c ld.c event.c sac.c message.c \
152 poolmrg.c poolmfs.c poolmv.c dbgpool.c dbgpooli.c \ 152 poolmrg.c poolmfs.c poolmv.c dbgpool.c dbgpooli.c \
153 boot.c meter.c splay.c cbs.c diag.c 153 boot.c meter.c splay.c cbs.c diag.c \
154 ss.c table.c
154MPM = $(MPMCOMMON) $(MPMPF) 155MPM = $(MPMCOMMON) $(MPMPF)
155 156
156 157
@@ -414,7 +415,7 @@ $(PFM)/$(VARIETY)/zmess: $(PFM)/$(VARIETY)/zmess.o \
414 $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a 415 $(FMTDYTSTOBJ) $(TESTLIBOBJ) $(PFM)/$(VARIETY)/mps.a
415 416
416$(PFM)/$(VARIETY)/eventcnv: $(PFM)/$(VARIETY)/eventcnv.o \ 417$(PFM)/$(VARIETY)/eventcnv: $(PFM)/$(VARIETY)/eventcnv.o \
417 $(PFM)/$(VARIETY)/eventpro.o $(PFM)/$(VARIETY)/table.o 418 $(PFM)/$(VARIETY)/eventpro.o $(PFM)/$(VARIETY)/mps.a
418 419
419$(PFM)/$(VARIETY)/replay: $(PFM)/$(VARIETY)/replay.o \ 420$(PFM)/$(VARIETY)/replay: $(PFM)/$(VARIETY)/replay.o \
420 $(PFM)/$(VARIETY)/eventrep.o \ 421 $(PFM)/$(VARIETY)/eventrep.o \
diff --git a/mps/code/commpost.nmk b/mps/code/commpost.nmk
index 63cd0fdd44f..483acb09710 100644
--- a/mps/code/commpost.nmk
+++ b/mps/code/commpost.nmk
@@ -232,7 +232,7 @@ $(PFM)\$(VARIETY)\zmess.exe: $(PFM)\$(VARIETY)\zmess.obj \
232 $(TESTLIBOBJ) 232 $(TESTLIBOBJ)
233 233
234$(PFM)\$(VARIETY)\eventcnv.exe: $(PFM)\$(VARIETY)\eventcnv.obj \ 234$(PFM)\$(VARIETY)\eventcnv.exe: $(PFM)\$(VARIETY)\eventcnv.obj \
235 $(PFM)\$(VARIETY)\eventpro.obj $(PFM)\$(VARIETY)\table.obj 235 $(PFM)\$(VARIETY)\eventpro.obj $(MPMOBJ) $(PLINTHOBJ)
236 236
237$(PFM)\$(VARIETY)\replay.exe: $(PFM)\$(VARIETY)\replay.obj \ 237$(PFM)\$(VARIETY)\replay.exe: $(PFM)\$(VARIETY)\replay.obj \
238 $(PFM)\$(VARIETY)\eventrep.obj \ 238 $(PFM)\$(VARIETY)\eventrep.obj \
diff --git a/mps/code/eventpro.c b/mps/code/eventpro.c
index 52d913a19dd..a1856eaee02 100644
--- a/mps/code/eventpro.c
+++ b/mps/code/eventpro.c
@@ -340,6 +340,19 @@ void EventDestroy(EventProc proc, Event event)
340 340
341/* EventProcCreate -- initialize the module */ 341/* EventProcCreate -- initialize the module */
342 342
343static void *tableAlloc(void *closure, size_t size)
344{
345 UNUSED(closure);
346 return malloc(size);
347}
348
349static void tableFree(void *closure, void *p, size_t size)
350{
351 UNUSED(closure);
352 UNUSED(size);
353 free(p);
354}
355
343Res EventProcCreate(EventProc *procReturn, 356Res EventProcCreate(EventProc *procReturn,
344 EventProcReader reader, 357 EventProcReader reader,
345 void *readerP) 358 void *readerP)
@@ -356,9 +369,14 @@ Res EventProcCreate(EventProc *procReturn,
356 assert(sizeof(Word) >= sizeof(Addr)); 369 assert(sizeof(Word) >= sizeof(Addr));
357 370
358 proc->reader = reader; proc->readerP = readerP; 371 proc->reader = reader; proc->readerP = readerP;
359 res = TableCreate(&proc->internTable, (size_t)1<<4); 372 res = TableCreate(&proc->internTable,
373 (size_t)1<<4,
374 tableAlloc, tableFree, NULL,
375 (Word)-1, (Word)-2); /* because MPS IDs are serials from zero up */
360 if (res != ResOK) goto failIntern; 376 if (res != ResOK) goto failIntern;
361 res = TableCreate(&proc->labelTable, (size_t)1<<7); 377 res = TableCreate(&proc->labelTable, (size_t)1<<7,
378 tableAlloc, tableFree, NULL,
379 0, 1); /* no Addrs down here */
362 if (res != ResOK) goto failLabel; 380 if (res != ResOK) goto failLabel;
363 proc->cachedEvent = NULL; 381 proc->cachedEvent = NULL;
364 *procReturn = proc; 382 *procReturn = proc;
@@ -374,23 +392,25 @@ failIntern:
374 392
375/* EventProcDestroy -- finish the module */ 393/* EventProcDestroy -- finish the module */
376 394
377static void deallocItem(Word key, void *value) 395static void deallocItem(void *closure, Word key, void *value)
378{ 396{
379 UNUSED(key); 397 UNUSED(key);
398 UNUSED(closure);
380 free(value); 399 free(value);
381} 400}
382 401
383static void deallocSym(Word key, void *value) 402static void deallocSym(void *closure, Word key, void *value)
384{ 403{
385 UNUSED(key); 404 UNUSED(key);
405 UNUSED(closure);
386 eventStringDestroy(((Symbol)value)->name); 406 eventStringDestroy(((Symbol)value)->name);
387 free(value); 407 free(value);
388} 408}
389 409
390void EventProcDestroy(EventProc proc) 410void EventProcDestroy(EventProc proc)
391{ 411{
392 TableMap(proc->labelTable, deallocItem); 412 TableMap(proc->labelTable, deallocItem, NULL);
393 TableMap(proc->internTable, deallocSym); 413 TableMap(proc->internTable, deallocSym, NULL);
394 TableDestroy(proc->labelTable); 414 TableDestroy(proc->labelTable);
395 TableDestroy(proc->internTable); 415 TableDestroy(proc->internTable);
396 if (proc->cachedEvent != NULL) 416 if (proc->cachedEvent != NULL)
diff --git a/mps/code/fmtdytst.h b/mps/code/fmtdytst.h
index 5aea06c7ee7..14f8993383d 100644
--- a/mps/code/fmtdytst.h
+++ b/mps/code/fmtdytst.h
@@ -8,6 +8,7 @@
8#define fmtdytst_h 8#define fmtdytst_h
9 9
10#include "mps.h" 10#include "mps.h"
11#include "testlib.h"
11 12
12extern mps_res_t dylan_init(mps_addr_t addr, size_t size, 13extern mps_res_t dylan_init(mps_addr_t addr, size_t size,
13 mps_addr_t *refs, size_t nr_refs); 14 mps_addr_t *refs, size_t nr_refs);
@@ -25,8 +26,16 @@ extern mps_res_t make_dylan_vector(mps_word_t *v, mps_ap_t ap, size_t slots);
25 26
26#define DYLAN_INT(n) (((mps_word_t)(n) << 2) | 1) 27#define DYLAN_INT(n) (((mps_word_t)(n) << 2) | 1)
27 28
29#define INT_DYI(n) ( (n) <= DYLAN_UINT_MAX ? DYLAN_INT(n) : (mps_word_t)fail() )
30
31
28#define DYLAN_INT_INT(d) ((mps_word_t)(d) >> 2) 32#define DYLAN_INT_INT(d) ((mps_word_t)(d) >> 2)
29 33
34#define DYI_INT(d) ( ((d) & 0x3) == 0x1 ? DYLAN_INT_INT(d) : (mps_word_t)fail() )
35
36#define DYLAN_UINT_MAX ((mps_word_t)-1 >> 2)
37#define DYLAN_UINT_MASK DYLAN_UINT_MAX
38
30#endif /* fmtdy_h */ 39#endif /* fmtdy_h */
31 40
32 41
diff --git a/mps/code/global.c b/mps/code/global.c
index 0dccbb5b914..43853745b58 100644
--- a/mps/code/global.c
+++ b/mps/code/global.c
@@ -212,6 +212,10 @@ Bool GlobalsCheck(Globals arenaGlobals)
212 CHECKL(BoolCheck(arenaRingInit)); 212 CHECKL(BoolCheck(arenaRingInit));
213 CHECKL(RingCheck(&arenaRing)); 213 CHECKL(RingCheck(&arenaRing));
214 214
215 CHECKL(BoolCheck(arena->emergency));
216
217 /* can't check arena->stackAtArenaEnter */
218
215 return TRUE; 219 return TRUE;
216} 220}
217 221
@@ -222,9 +226,8 @@ Res GlobalsInit(Globals arenaGlobals)
222{ 226{
223 Arena arena; 227 Arena arena;
224 Index i; 228 Index i;
225 TraceId ti;
226 Trace trace;
227 Rank rank; 229 Rank rank;
230 TraceId ti;
228 231
229 /* This is one of the first things that happens, */ 232 /* This is one of the first things that happens, */
230 /* so check static consistency here. */ 233 /* so check static consistency here. */
@@ -287,13 +290,15 @@ Res GlobalsInit(Globals arenaGlobals)
287 for(i = 0; i < ShieldCacheSIZE; i++) 290 for(i = 0; i < ShieldCacheSIZE; i++)
288 arena->shCache[i] = NULL; 291 arena->shCache[i] = NULL;
289 292
290 TRACE_SET_ITER(ti, trace, TraceSetUNIV, arena) 293 for (ti = 0; ti < TraceLIMIT; ++ti) {
291 /* <design/arena/#trace.invalid> */ 294 /* <design/arena/#trace.invalid> */
292 arena->trace[ti].sig = SigInvalid; 295 arena->trace[ti].sig = SigInvalid;
296 /* ti must be valid so that TraceSetIsMember etc. always work */
297 arena->trace[ti].ti = ti;
293 /* <design/message-gc/#lifecycle> */ 298 /* <design/message-gc/#lifecycle> */
294 arena->tsMessage[ti] = NULL; 299 arena->tsMessage[ti] = NULL;
295 arena->tMessage[ti] = NULL; 300 arena->tMessage[ti] = NULL;
296 TRACE_SET_ITER_END(ti, trace, TraceSetUNIV, arena); 301 }
297 302
298 for(rank = 0; rank < RankLIMIT; ++rank) 303 for(rank = 0; rank < RankLIMIT; ++rank)
299 RingInit(&arena->greyRing[rank]); 304 RingInit(&arena->greyRing[rank]);
@@ -305,6 +310,10 @@ Res GlobalsInit(Globals arenaGlobals)
305 for(i = 0; i < LDHistoryLENGTH; ++i) 310 for(i = 0; i < LDHistoryLENGTH; ++i)
306 arena->history[i] = RefSetEMPTY; 311 arena->history[i] = RefSetEMPTY;
307 312
313 arena->emergency = FALSE;
314
315 arena->stackAtArenaEnter = NULL;
316
308 arenaGlobals->sig = GlobalsSig; 317 arenaGlobals->sig = GlobalsSig;
309 AVERT(Globals, arenaGlobals); 318 AVERT(Globals, arenaGlobals);
310 return ResOK; 319 return ResOK;
@@ -1031,6 +1040,39 @@ Res GlobalsDescribe(Globals arenaGlobals, mps_lib_FILE *stream)
1031} 1040}
1032 1041
1033 1042
1043/* ArenaSetEmergency -- move the arena into emergency mode
1044 *
1045 * Emergency mode is set when garbage collection cannot make progress because
1046 * it can't allocate memory.
1047 *
1048 * Emergency mode affects the choice of PoolFixMethod in new ScanStates.
1049 * See ScanStateInit.
1050 *
1051 * If the traces aren't normal GC traces, and have their fix method
1052 * set to something other than PoolFix, then this won't affect the choice
1053 * of fix method in ScanStateInit and so won't have any effect. Whatever
1054 * caused the first failure will likely repeat.
1055 */
1056
1057void ArenaSetEmergency(Arena arena, Bool emergency)
1058{
1059 AVERT(Arena, arena);
1060 AVERT(Bool, emergency);
1061
1062 DIAG_SINGLEF(( "ArenaSetEmergency",
1063 "emergency: $U", (WriteFU)emergency, NULL ));
1064
1065 arena->emergency = emergency;
1066}
1067
1068Bool ArenaEmergency(Arena arena)
1069{
1070 AVERT(Arena, arena);
1071 return arena->emergency;
1072}
1073
1074
1075
1034/* C. COPYRIGHT AND LICENSE 1076/* C. COPYRIGHT AND LICENSE
1035 * 1077 *
1036 * Copyright (C) 2001-2003, 2008 Ravenbrook Limited <http://www.ravenbrook.com/>. 1078 * Copyright (C) 2001-2003, 2008 Ravenbrook Limited <http://www.ravenbrook.com/>.
diff --git a/mps/code/mpm.h b/mps/code/mpm.h
index f653e643d8a..f8f2e9e7466 100644
--- a/mps/code/mpm.h
+++ b/mps/code/mpm.h
@@ -210,7 +210,7 @@ extern Res PoolScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg);
210extern Res (PoolFix)(Pool pool, ScanState ss, Seg seg, Addr *refIO); 210extern Res (PoolFix)(Pool pool, ScanState ss, Seg seg, Addr *refIO);
211#define PoolFix(pool, ss, seg, refIO) \ 211#define PoolFix(pool, ss, seg, refIO) \
212 ((*(pool)->fix)(pool, ss, seg, refIO)) 212 ((*(pool)->fix)(pool, ss, seg, refIO))
213extern void PoolFixEmergency(Pool pool, ScanState ss, Seg seg, Addr *refIO); 213extern Res PoolFixEmergency(Pool pool, ScanState ss, Seg seg, Addr *refIO);
214extern void PoolReclaim(Pool pool, Trace trace, Seg seg); 214extern void PoolReclaim(Pool pool, Trace trace, Seg seg);
215extern void PoolTraceEnd(Pool pool, Trace trace); 215extern void PoolTraceEnd(Pool pool, Trace trace);
216extern void PoolWalk(Pool pool, Seg seg, FormattedObjectsStepMethod f, 216extern void PoolWalk(Pool pool, Seg seg, FormattedObjectsStepMethod f,
@@ -374,14 +374,11 @@ extern void TraceDestroy(Trace trace);
374 374
375extern Res TraceAddWhite(Trace trace, Seg seg); 375extern Res TraceAddWhite(Trace trace, Seg seg);
376extern Res TraceCondemnZones(Trace trace, ZoneSet condemnedSet); 376extern Res TraceCondemnZones(Trace trace, ZoneSet condemnedSet);
377extern void TraceStart(Trace trace, double mortality, 377extern Res TraceStart(Trace trace, double mortality, double finishingTime);
378 double finishingTime);
379extern Size TracePoll(Globals globals); 378extern Size TracePoll(Globals globals);
380 379
381extern Rank TraceRankForAccess(Arena arena, Seg seg); 380extern Rank TraceRankForAccess(Arena arena, Seg seg);
382extern void TraceSegAccess(Arena arena, Seg seg, AccessSet mode); 381extern void TraceSegAccess(Arena arena, Seg seg, AccessSet mode);
383extern Res TraceFix(ScanState ss, Ref *refIO);
384extern Res TraceFixEmergency(ScanState ss, Ref *refIO);
385 382
386extern void TraceQuantum(Trace trace); 383extern void TraceQuantum(Trace trace);
387extern Res TraceStartCollectAll(Trace *traceReturn, Arena arena, int why); 384extern Res TraceStartCollectAll(Trace *traceReturn, Arena arena, int why);
@@ -421,12 +418,11 @@ extern double TraceWorkFactor;
421#define TRACE_FIX1(ss, ref) \ 418#define TRACE_FIX1(ss, ref) \
422 (SCANt = (Word)1 << ((Word)(ref) >> SCANzoneShift & (MPS_WORD_WIDTH-1)), \ 419 (SCANt = (Word)1 << ((Word)(ref) >> SCANzoneShift & (MPS_WORD_WIDTH-1)), \
423 SCANsummary |= SCANt, \ 420 SCANsummary |= SCANt, \
424 SCANwhite & SCANt) 421 (SCANwhite & SCANt) != 0)
425 422
426/* Equivalent to <code/mps.h> MPS_FIX2 */ 423/* Equivalent to <code/mps.h> MPS_FIX2 */
427 424
428#define TRACE_FIX2(ss, refIO) \ 425#define TRACE_FIX2(ss, refIO) mps_fix2((mps_ss_t)(ss), (mps_addr_t *)(refIO))
429 ((*(ss)->fix)(ss, refIO))
430 426
431/* Equivalent to <code/mps.h> MPS_FIX */ 427/* Equivalent to <code/mps.h> MPS_FIX */
432 428
@@ -522,6 +518,9 @@ extern Res ArenaStartCollect(Globals globals, int why);
522extern Res ArenaCollect(Globals globals, int why); 518extern Res ArenaCollect(Globals globals, int why);
523extern Bool ArenaHasAddr(Arena arena, Addr addr); 519extern Bool ArenaHasAddr(Arena arena, Addr addr);
524 520
521extern void ArenaSetEmergency(Arena arena, Bool emergency);
522extern Bool ArenaEmergency(Arena arean);
523
525extern Res ControlInit(Arena arena); 524extern Res ControlInit(Arena arena);
526extern void ControlFinish(Arena arena); 525extern void ControlFinish(Arena arena);
527extern Res ControlAlloc(void **baseReturn, Arena arena, size_t size, 526extern Res ControlAlloc(void **baseReturn, Arena arena, size_t size,
diff --git a/mps/code/mpmst.h b/mps/code/mpmst.h
index 0903e783ba0..8807d086b66 100644
--- a/mps/code/mpmst.h
+++ b/mps/code/mpmst.h
@@ -453,9 +453,8 @@ typedef struct LDStruct {
453 * 453 *
454 * .ss: See <code/trace.c>. 454 * .ss: See <code/trace.c>.
455 * 455 *
456 * .ss: The first four fields of the trace structure must match the 456 * .ss: The first three fields of the trace structure must match the
457 * external scan state structure (mps_ss_s) thus: 457 * external scan state structure (mps_ss_s) thus:
458 * ss->fix mps_ss->fix
459 * ss->zoneShift mps_ss->w0 458 * ss->zoneShift mps_ss->w0
460 * ss->white mps_ss->w1 459 * ss->white mps_ss->w1
461 * ss->unfixedSummary mps_ss->w2 460 * ss->unfixedSummary mps_ss->w2
@@ -466,12 +465,13 @@ typedef struct LDStruct {
466#define ScanStateSig ((Sig)0x5195CA45) /* SIGnature SCAN State */ 465#define ScanStateSig ((Sig)0x5195CA45) /* SIGnature SCAN State */
467 466
468typedef struct ScanStateStruct { 467typedef struct ScanStateStruct {
469 TraceFixMethod fix; /* fix function */
470 Word zoneShift; /* copy of arena->zoneShift. See .ss.zone */ 468 Word zoneShift; /* copy of arena->zoneShift. See .ss.zone */
471 ZoneSet white; /* white set, for inline fix test */ 469 ZoneSet white; /* white set, for inline fix test */
472 RefSet unfixedSummary; /* accumulated summary of scanned references */ 470 RefSet unfixedSummary; /* accumulated summary of scanned references */
473 Sig sig; /* <design/sig/> */ 471 Sig sig; /* <design/sig/> */
474 Arena arena; /* owning arena */ 472 Arena arena; /* owning arena */
473 PoolFixMethod fix; /* third stage fix function */
474 void *fixClosure; /* closure data for fix */
475 TraceSet traces; /* traces to scan for */ 475 TraceSet traces; /* traces to scan for */
476 Rank rank; /* reference rank of scanning */ 476 Rank rank; /* reference rank of scanning */
477 Bool wasMarked; /* design.mps.fix.protocol.was-ready */ 477 Bool wasMarked; /* design.mps.fix.protocol.was-ready */
@@ -505,7 +505,8 @@ typedef struct TraceStruct {
505 TraceState state; /* current state of trace */ 505 TraceState state; /* current state of trace */
506 Rank band; /* current band */ 506 Rank band; /* current band */
507 Bool firstStretch; /* in first stretch of band (see accessor) */ 507 Bool firstStretch; /* in first stretch of band (see accessor) */
508 Bool emergency; /* ran out of memory during trace */ 508 PoolFixMethod fix; /* fix method to apply to references */
509 void *fixClosure; /* closure information for fix method */
509 Chain chain; /* chain being incrementally collected */ 510 Chain chain; /* chain being incrementally collected */
510 STATISTIC_DECL(Size preTraceArenaReserved); /* ArenaReserved before this trace */ 511 STATISTIC_DECL(Size preTraceArenaReserved); /* ArenaReserved before this trace */
511 Size condemned; /* condemned bytes */ 512 Size condemned; /* condemned bytes */
@@ -712,6 +713,11 @@ typedef struct ArenaStruct {
712 RefSet prehistory; /* <design/arena/#ld.prehistory> */ 713 RefSet prehistory; /* <design/arena/#ld.prehistory> */
713 RefSet history[LDHistoryLENGTH]; /* <design/arena/#ld.history> */ 714 RefSet history[LDHistoryLENGTH]; /* <design/arena/#ld.history> */
714 715
716 Bool emergency; /* garbage collect in emergency mode? */
717
718 Addr *stackAtArenaEnter; /* NULL or top of client stack, in the thread */
719 /* that then entered the MPS. */
720
715 Sig sig; 721 Sig sig;
716} ArenaStruct; 722} ArenaStruct;
717 723
diff --git a/mps/code/mpmtypes.h b/mps/code/mpmtypes.h
index d4f07cb99f4..66d5b533546 100644
--- a/mps/code/mpmtypes.h
+++ b/mps/code/mpmtypes.h
@@ -411,6 +411,8 @@ enum {
411/* TraceStart reasons: the trigger that caused a trace to start. */ 411/* TraceStart reasons: the trigger that caused a trace to start. */
412/* Make these specific trigger names, not broad categories; */ 412/* Make these specific trigger names, not broad categories; */
413/* and if a new trigger is added, add a new reason. */ 413/* and if a new trigger is added, add a new reason. */
414/* TODO: A better way for MPS extensions to extend the list of reasons
415 instead of the catch-all TraceStartWhyEXTENSION. */
414 416
415enum { 417enum {
416 TraceStartWhyBASE = 1, /* not a reason, the base of the enum. */ 418 TraceStartWhyBASE = 1, /* not a reason, the base of the enum. */
@@ -419,7 +421,8 @@ enum {
419 TraceStartWhyOPPORTUNISM, /* start full */ 421 TraceStartWhyOPPORTUNISM, /* start full */
420 TraceStartWhyCLIENTFULL_INCREMENTAL, /* start full */ 422 TraceStartWhyCLIENTFULL_INCREMENTAL, /* start full */
421 TraceStartWhyCLIENTFULL_BLOCK, /* do full */ 423 TraceStartWhyCLIENTFULL_BLOCK, /* do full */
422 TraceStartWhyWALK, 424 TraceStartWhyWALK, /* walking references -- see walk.c */
425 TraceStartWhyEXTENSION, /* MPS extension using traces */
423 TraceStartWhyLIMIT /* not a reason, the limit of the enum. */ 426 TraceStartWhyLIMIT /* not a reason, the limit of the enum. */
424}; 427};
425 428
diff --git a/mps/code/mps.c b/mps/code/mps.c
index 8adcf146226..1d1d93100d0 100644
--- a/mps/code/mps.c
+++ b/mps/code/mps.c
@@ -67,7 +67,9 @@
67#include "splay.c" 67#include "splay.c"
68#include "cbs.c" 68#include "cbs.c"
69#include "diag.c" 69#include "diag.c"
70#include "ss.c"
70#include "version.c" 71#include "version.c"
72#include "table.c"
71 73
72/* Additional pool classes */ 74/* Additional pool classes */
73 75
@@ -184,7 +186,7 @@
184#include "protw3.c" /* Windows protection */ 186#include "protw3.c" /* Windows protection */
185#include "proti3.c" /* 32-bit Intel mutator context decoding */ 187#include "proti3.c" /* 32-bit Intel mutator context decoding */
186#include "prmci3w3.c" /* Windows on 32-bit Intel mutator context */ 188#include "prmci3w3.c" /* Windows on 32-bit Intel mutator context */
187#include "ssw3mv.c" /* Windows stack scan for Microsoft C */ 189#include "ssw3i3mv.c" /* Windows on 32-bit stack scan for Microsoft C */
188#include "spi3.c" /* Intel stack probe */ 190#include "spi3.c" /* Intel stack probe */
189#include "mpsiw3.c" /* Windows interface layer extras */ 191#include "mpsiw3.c" /* Windows interface layer extras */
190 192
@@ -200,7 +202,7 @@
200#include "protw3.c" /* Windows protection */ 202#include "protw3.c" /* Windows protection */
201#include "proti6.c" /* 64-bit Intel mutator context decoding */ 203#include "proti6.c" /* 64-bit Intel mutator context decoding */
202#include "prmci6w3.c" /* Windows on 64-bit Intel mutator context */ 204#include "prmci6w3.c" /* Windows on 64-bit Intel mutator context */
203#include "ssw3mv.c" /* Windows stack scan for Microsoft C */ 205#include "ssw3i6mv.c" /* Windows on 64-bit stack scan for Microsoft C */
204#include "span.c" /* generic stack probe FIXME: Is this correct? */ 206#include "span.c" /* generic stack probe FIXME: Is this correct? */
205#include "mpsiw3.c" /* Windows interface layer extras */ 207#include "mpsiw3.c" /* Windows interface layer extras */
206 208
diff --git a/mps/code/mps.h b/mps/code/mps.h
index 633dbb5ddac..8971c3a4f2d 100644
--- a/mps/code/mps.h
+++ b/mps/code/mps.h
@@ -189,7 +189,6 @@ typedef mps_addr_t (*mps_fmt_class_t)(mps_addr_t);
189/* .ss: See also <code/mpsi.c#check.ss> and <code/mpmst.h#ss>. */ 189/* .ss: See also <code/mpsi.c#check.ss> and <code/mpmst.h#ss>. */
190 190
191typedef struct mps_ss_s { 191typedef struct mps_ss_s {
192 mps_res_t (*fix)(mps_ss_t, mps_addr_t *);
193 mps_word_t w0, w1, w2; 192 mps_word_t w0, w1, w2;
194} mps_ss_s; 193} mps_ss_s;
195 194
@@ -640,10 +639,10 @@ extern mps_res_t mps_fix(mps_ss_t, mps_addr_t *);
640 (_mps_wt = (mps_word_t)1 << ((mps_word_t)(ref) >> _mps_w0 \ 639 (_mps_wt = (mps_word_t)1 << ((mps_word_t)(ref) >> _mps_w0 \
641 & (sizeof(mps_word_t) * CHAR_BIT - 1)), \ 640 & (sizeof(mps_word_t) * CHAR_BIT - 1)), \
642 _mps_w2 |= _mps_wt, \ 641 _mps_w2 |= _mps_wt, \
643 _mps_w1 & _mps_wt) 642 (_mps_w1 & _mps_wt) != 0)
644 643
645#define MPS_FIX2(ss, ref_io) \ 644extern mps_res_t mps_fix2(mps_ss_t, mps_addr_t *);
646 ((*(ss)->fix)(ss, ref_io)) 645#define MPS_FIX2(ss, ref_io) mps_fix2(ss, ref_io)
647 646
648#define MPS_FIX12(ss, ref_io) \ 647#define MPS_FIX12(ss, ref_io) \
649 (MPS_FIX1(ss, *(ref_io)) ? \ 648 (MPS_FIX1(ss, *(ref_io)) ? \
diff --git a/mps/code/mpsi.c b/mps/code/mpsi.c
index c97826e7c98..528ffebea3e 100644
--- a/mps/code/mpsi.c
+++ b/mps/code/mpsi.c
@@ -160,11 +160,7 @@ static Bool mpsi_check(void)
160 /* Check ss_s/ScanStateStruct compatibility by hand */ 160 /* Check ss_s/ScanStateStruct compatibility by hand */
161 /* .check.ss: See <code/mps.h#ss> and <code/mpmst.h#ss>. */ 161 /* .check.ss: See <code/mps.h#ss> and <code/mpmst.h#ss>. */
162 /* Note that the size of the mps_ss_s and ScanStateStruct */ 162 /* Note that the size of the mps_ss_s and ScanStateStruct */
163 /* are not equal. See <code/mpmst.h#ss>. COMPATFIELDAPPROX */ 163 /* are not equal. See <code/mpmst.h#ss>. */
164 /* is used on the fix field because its type is punned and */
165 /* therefore isn't exactly checkable. See */
166 /* <design/interface-c/#pun.addr>. */
167 CHECKL(COMPATFIELDAPPROX(mps_ss_s, fix, ScanStateStruct, fix));
168 CHECKL(COMPATFIELD(mps_ss_s, w0, ScanStateStruct, zoneShift)); 164 CHECKL(COMPATFIELD(mps_ss_s, w0, ScanStateStruct, zoneShift));
169 CHECKL(COMPATFIELD(mps_ss_s, w1, ScanStateStruct, white)); 165 CHECKL(COMPATFIELD(mps_ss_s, w1, ScanStateStruct, white));
170 CHECKL(COMPATFIELD(mps_ss_s, w2, ScanStateStruct, unfixedSummary)); 166 CHECKL(COMPATFIELD(mps_ss_s, w2, ScanStateStruct, unfixedSummary));
diff --git a/mps/code/pool.c b/mps/code/pool.c
index 0635bf012b3..a88cf48e97c 100644
--- a/mps/code/pool.c
+++ b/mps/code/pool.c
@@ -425,7 +425,7 @@ Res (PoolFix)(Pool pool, ScanState ss, Seg seg, Addr *refIO)
425 return PoolFix(pool, ss, seg, refIO); 425 return PoolFix(pool, ss, seg, refIO);
426} 426}
427 427
428void PoolFixEmergency(Pool pool, ScanState ss, Seg seg, Addr *refIO) 428Res PoolFixEmergency(Pool pool, ScanState ss, Seg seg, Addr *refIO)
429{ 429{
430 Res res; 430 Res res;
431 431
@@ -440,6 +440,7 @@ void PoolFixEmergency(Pool pool, ScanState ss, Seg seg, Addr *refIO)
440 440
441 res = (pool->class->fixEmergency)(pool, ss, seg, refIO); 441 res = (pool->class->fixEmergency)(pool, ss, seg, refIO);
442 AVER(res == ResOK); 442 AVER(res == ResOK);
443 return res;
443} 444}
444 445
445 446
diff --git a/mps/code/poolamc.c b/mps/code/poolamc.c
index 8c9934d43ca..713a627dd08 100644
--- a/mps/code/poolamc.c
+++ b/mps/code/poolamc.c
@@ -1520,55 +1520,41 @@ static Res amcScanNailed(Bool *totalReturn, ScanState ss, Pool pool,
1520 } while(moreScanning); 1520 } while(moreScanning);
1521 1521
1522 if(loops > 1) { 1522 if(loops > 1) {
1523 { 1523 RefSet refset;
1524 /* Looped: should only happen under emergency tracing. */ 1524
1525 TraceId ti; 1525 AVER(ArenaEmergency(PoolArena(pool)));
1526 Trace trace; 1526
1527 Bool emerg = FALSE; 1527 /* Looped: fixed refs (from 1st pass) were seen by MPS_FIX1
1528 TraceSet ts = ss->traces; 1528 * (in later passes), so the "ss.unfixedSummary" is _not_
1529 Arena arena = pool->arena; 1529 * purely unfixed. In this one case, unfixedSummary is not
1530 1530 * accurate, and cannot be used to verify the SegSummary (see
1531 TRACE_SET_ITER(ti, trace, ts, arena) 1531 * impl/trace/#verify.segsummary). Use ScanStateSetSummary to
1532 if(trace->emergency) { 1532 * store ScanStateSummary in ss.fixedSummary and reset
1533 emerg = TRUE; 1533 * ss.unfixedSummary. See job001548.
1534 } 1534 */
1535 TRACE_SET_ITER_END(ti, trace, ts, arena); 1535
1536 AVER(emerg); 1536 refset = ScanStateSummary(ss);
1537 }
1538 {
1539 /* Looped: fixed refs (from 1st pass) were seen by MPS_FIX1
1540 * (in later passes), so the "ss.unfixedSummary" is _not_
1541 * purely unfixed. In this one case, unfixedSummary is not
1542 * accurate, and cannot be used to verify the SegSummary (see
1543 * impl/trace/#verify.segsummary). Use ScanStateSetSummary to
1544 * store ScanStateSummary in ss.fixedSummary and reset
1545 * ss.unfixedSummary. See job001548.
1546 */
1547 RefSet refset;
1548
1549 refset = ScanStateSummary(ss);
1550 1537
1551#if 1 1538#if 1
1552 /* A rare event, which might prompt a rare defect to appear. */ 1539 /* A rare event, which might prompt a rare defect to appear. */
1553 DIAG_SINGLEF(( "amcScanNailed_loop", 1540 DIAG_SINGLEF(( "amcScanNailed_loop",
1554 "scan completed, but had to loop $U times:\n", (WriteFU)loops, 1541 "scan completed, but had to loop $U times:\n", (WriteFU)loops,
1555 " SegSummary: $B\n", (WriteFB)SegSummary(seg), 1542 " SegSummary: $B\n", (WriteFB)SegSummary(seg),
1556 " ss.white: $B\n", (WriteFB)ss->white, 1543 " ss.white: $B\n", (WriteFB)ss->white,
1557 " ss.unfixedSummary: $B", (WriteFB)ss->unfixedSummary, 1544 " ss.unfixedSummary: $B", (WriteFB)ss->unfixedSummary,
1558 "$S\n", (WriteFS)( 1545 "$S\n", (WriteFS)(
1559 (RefSetSub(ss->unfixedSummary, SegSummary(seg))) 1546 (RefSetSub(ss->unfixedSummary, SegSummary(seg)))
1560 ? "" 1547 ? ""
1561 : " <=== This would have failed .verify.segsummary!" 1548 : " <=== This would have failed .verify.segsummary!"
1562 ), 1549 ),
1563 " ss.fixedSummary: $B\n", (WriteFB)ss->fixedSummary, 1550 " ss.fixedSummary: $B\n", (WriteFB)ss->fixedSummary,
1564 "ScanStateSummary: $B\n", (WriteFB)refset, 1551 "ScanStateSummary: $B\n", (WriteFB)refset,
1565 "MOVING ScanStateSummary TO fixedSummary, " 1552 "MOVING ScanStateSummary TO fixedSummary, "
1566 "RESETTING unfixedSummary.\n", NULL 1553 "RESETTING unfixedSummary.\n", NULL
1567 )); 1554 ));
1568#endif 1555#endif
1569 1556
1570 ScanStateSetSummary(ss, refset); 1557 ScanStateSetSummary(ss, refset);
1571 }
1572 } 1558 }
1573 1559
1574 *totalReturn = total; 1560 *totalReturn = total;
@@ -2219,7 +2205,6 @@ static void AMCTraceEnd(Pool pool, Trace trace)
2219 DIAG_SINGLEF(( "AMCTraceEnd_pageret", 2205 DIAG_SINGLEF(( "AMCTraceEnd_pageret",
2220 " $U", (WriteFU)ArenaEpoch(pool->arena), 2206 " $U", (WriteFU)ArenaEpoch(pool->arena),
2221 " $U", (WriteFU)trace->why, 2207 " $U", (WriteFU)trace->why,
2222 " $S", (WriteFS)(trace->emergency ? "Emergency" : "-"),
2223 " $U", (WriteFU)amc->pageretstruct[ti].pCond, 2208 " $U", (WriteFU)amc->pageretstruct[ti].pCond,
2224 " $U", (WriteFU)amc->pageretstruct[ti].pRet, ",", 2209 " $U", (WriteFU)amc->pageretstruct[ti].pRet, ",",
2225 " $U", (WriteFU)amc->pageretstruct[ti].pCS, 2210 " $U", (WriteFU)amc->pageretstruct[ti].pCS,
diff --git a/mps/code/protsgix.c b/mps/code/protsgix.c
index 80a2f94dff0..9552d18eeca 100644
--- a/mps/code/protsgix.c
+++ b/mps/code/protsgix.c
@@ -78,6 +78,8 @@ static void sigHandle(int sig, siginfo_t *info, void *context) /* .sigh.args */
78 /* sigset renamed to asigset due to clash with global on Darwin. */ 78 /* sigset renamed to asigset due to clash with global on Darwin. */
79 sigset_t asigset, oldset; 79 sigset_t asigset, oldset;
80 struct sigaction sa; 80 struct sigaction sa;
81
82 UNUSED(context);
81 83
82 UNUSED(context); 84 UNUSED(context);
83 AVER(sig == PROT_SIGNAL); 85 AVER(sig == PROT_SIGNAL);
diff --git a/mps/code/ss.c b/mps/code/ss.c
new file mode 100644
index 00000000000..084682b2a23
--- /dev/null
+++ b/mps/code/ss.c
@@ -0,0 +1,106 @@
1/* ss.c: STACK SCANNING
2 *
3 * $Id$
4 * Copyright (c) 2001 Ravenbrook Limited. See end of file for license.
5 *
6 * This is part of the code that scans the stack and fixes the registers
7 * that may contain roots. See <design/thread-manager/>
8 *
9 * Each platform ABI has a set of callee-save registers that may still
10 * contain roots. The StackScan function is defined for each ABI in source
11 * files like ss*.c and ss*.asm. That function saves the callee save
12 * registers in its frame, then calls StackScanInner to do the scanning.
13 */
14
15#include "mpm.h"
16
17SRCID(ss, "$Id$");
18
19
20/* StackScanInner -- carry out stack scanning
21 *
22 * This function should be called by StackScan once it has saved the
23 * callee-save registers for the platform ABI in order to do the actual
24 * scanning.
25 */
26
27Res StackScanInner(ScanState ss,
28 Addr *stackBot,
29 Addr *stackTop,
30 Count nSavedRegs)
31{
32 Arena arena;
33 Res res;
34
35 AVERT(ScanState, ss);
36 AVER(stackTop < stackBot);
37 AVER(AddrIsAligned((Addr)stackTop, sizeof(Addr))); /* .assume.align */
38 AVER(0 < nSavedRegs && nSavedRegs < 128); /* sanity check */
39
40 arena = ss->arena;
41
42 /* If a stack pointer was stored when we entered the arena (through the
43 MPS interface in mpsi*.c) then we scan just the saved registers and
44 the stack starting there, in order to avoid false ambiguous references
45 in the MPS stack. This is particularly important for transforms
46 (trans.c). Otherwise, scan the whole stack. */
47
48 if (arena->stackAtArenaEnter != NULL) {
49 AVER(stackTop < arena->stackAtArenaEnter);
50 AVER(arena->stackAtArenaEnter < stackBot);
51 res = TraceScanAreaTagged(ss, stackTop, stackTop + nSavedRegs);
52 if (res != ResOK)
53 return res;
54 res = TraceScanAreaTagged(ss, arena->stackAtArenaEnter, stackBot);
55 if (res != ResOK)
56 return res;
57 } else {
58 res = TraceScanAreaTagged(ss, stackTop, stackBot);
59 if (res != ResOK)
60 return res;
61 }
62
63 return ResOK;
64}
65
66
67/* C. COPYRIGHT AND LICENSE
68 *
69 * Copyright (C) 2001-2002 Ravenbrook Limited <http://www.ravenbrook.com/>.
70 * All rights reserved. This is an open source license. Contact
71 * Ravenbrook for commercial licensing options.
72 *
73 * Redistribution and use in source and binary forms, with or without
74 * modification, are permitted provided that the following conditions are
75 * met:
76 *
77 * 1. Redistributions of source code must retain the above copyright
78 * notice, this list of conditions and the following disclaimer.
79 *
80 * 2. Redistributions in binary form must reproduce the above copyright
81 * notice, this list of conditions and the following disclaimer in the
82 * documentation and/or other materials provided with the distribution.
83 *
84 * 3. Redistributions in any form must be accompanied by information on how
85 * to obtain complete source code for this software and any accompanying
86 * software that uses this software. The source code must either be
87 * included in the distribution or be available for no more than the cost
88 * of distribution plus a nominal fee, and must be freely redistributable
89 * under reasonable conditions. For an executable file, complete source
90 * code means the source code for all modules it contains. It does not
91 * include source code for modules or files that typically accompany the
92 * major components of the operating system on which the executable file
93 * runs.
94 *
95 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
96 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
97 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
98 * PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL THE
99 * COPYRIGHT HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
100 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
101 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
102 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
103 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
104 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
105 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
106 */
diff --git a/mps/code/ss.h b/mps/code/ss.h
index 14c0fd19d70..831ad98aea8 100644
--- a/mps/code/ss.h
+++ b/mps/code/ss.h
@@ -12,24 +12,34 @@
12#include "mpm.h" 12#include "mpm.h"
13 13
14 14
15/* == StackScan == 15/* StackScan -- scan the current thread's stack
16 * 16 *
17 * StackScan scans the current stack between the 17 * StackScan scans the stack of the current thread, Between stackBot and the
18 * stackBot and the current top of stack. It also fixes 18 * current top of stack. It also fixes any roots which may be in callee-save
19 * any roots which may be in registers. 19 * registers.
20 * 20 *
21 * See the specific implementation for the exact registers which 21 * See the specific implementation for the exact registers which are scanned.
22 * are scanned.
23 * 22 *
24 * The word pointed to by stackBot is fixed if the stack 23 * If a stack pointer has been stashed at arena entry (through the MPS
25 * is by convention empty, and not fixed if it is full. 24 * interface in mpsi*.c) then only the registers and the stack between
26 * Where empty means sp points to first free word beyond the top of 25 * stackAtArenaEnter and stackBot is scanned, to avoid scanning false
27 * stack. Full means sp points to the top of stack itself. 26 * ambiguous references on the MPS's own stack. This is particularly
27 * important for transforms (trans.c).
28 *
29 * The word pointed to by stackBot is fixed if the stack is by convention
30 * empty, and not fixed if it is full. Where empty means sp points to first
31 * free word beyond the top of stack. Full means sp points to the top of
32 * stack itself.
28 */ 33 */
29 34
30extern Res StackScan(ScanState ss, Addr *stackBot); 35extern Res StackScan(ScanState ss, Addr *stackBot);
31 36
32 37
38extern Res StackScanInner(ScanState ss,
39 Addr *stackBot,
40 Addr *stackTop,
41 Count nSavedRegs);
42
33#endif /* ss_h */ 43#endif /* ss_h */
34 44
35 45
diff --git a/mps/code/ssixi3.c b/mps/code/ssixi3.c
index 04601803438..8cc1f8cbd45 100644
--- a/mps/code/ssixi3.c
+++ b/mps/code/ssixi3.c
@@ -52,8 +52,6 @@ SRCID(ssixi3, "$Id$");
52Res StackScan(ScanState ss, Addr *stackBot) 52Res StackScan(ScanState ss, Addr *stackBot)
53{ 53{
54 Addr calleeSaveRegs[4]; 54 Addr calleeSaveRegs[4];
55 Addr *stackTop;
56 Res res;
57 55
58 /* .assume.asm.stack */ 56 /* .assume.asm.stack */
59 /* Store the callee save registers on the stack so they get scanned 57 /* Store the callee save registers on the stack so they get scanned
@@ -63,12 +61,8 @@ Res StackScan(ScanState ss, Addr *stackBot)
63 ASMV("mov %%esi, %0" : "=m" (calleeSaveRegs[1])); 61 ASMV("mov %%esi, %0" : "=m" (calleeSaveRegs[1]));
64 ASMV("mov %%edi, %0" : "=m" (calleeSaveRegs[2])); 62 ASMV("mov %%edi, %0" : "=m" (calleeSaveRegs[2]));
65 ASMV("mov %%ebp, %0" : "=m" (calleeSaveRegs[3])); 63 ASMV("mov %%ebp, %0" : "=m" (calleeSaveRegs[3]));
66 ASMV("mov %%esp, %0" : "=r" (stackTop) :); /* stackTop = esp */ 64
67 65 return StackScanInner(ss, stackBot, calleeSaveRegs, NELEMS(calleeSaveRegs));
68 AVER(AddrIsAligned((Addr)stackTop, sizeof(Addr))); /* .assume.align */
69 res = TraceScanAreaTagged(ss, stackTop, stackBot);
70
71 return res;
72} 66}
73 67
74 68
diff --git a/mps/code/ssixi6.c b/mps/code/ssixi6.c
index 6eeac9ae45b..70954edb5da 100644
--- a/mps/code/ssixi6.c
+++ b/mps/code/ssixi6.c
@@ -50,21 +50,19 @@ SRCID(ssixi6, "$Id$");
50Res StackScan(ScanState ss, Addr *stackBot) 50Res StackScan(ScanState ss, Addr *stackBot)
51{ 51{
52 Addr calleeSaveRegs[6]; 52 Addr calleeSaveRegs[6];
53 Addr *stackTop; 53
54 Res res; 54 /* .assume.asm.stack */
55 55 /* Store the callee save registers on the stack so they get scanned
56 * as they may contain roots.
57 */
56 ASMV("mov %%rbp, %0" : "=m" (calleeSaveRegs[0])); 58 ASMV("mov %%rbp, %0" : "=m" (calleeSaveRegs[0]));
57 ASMV("mov %%rbx, %0" : "=m" (calleeSaveRegs[1])); 59 ASMV("mov %%rbx, %0" : "=m" (calleeSaveRegs[1]));
58 ASMV("mov %%r12, %0" : "=m" (calleeSaveRegs[2])); 60 ASMV("mov %%r12, %0" : "=m" (calleeSaveRegs[2]));
59 ASMV("mov %%r13, %0" : "=m" (calleeSaveRegs[3])); 61 ASMV("mov %%r13, %0" : "=m" (calleeSaveRegs[3]));
60 ASMV("mov %%r14, %0" : "=m" (calleeSaveRegs[4])); 62 ASMV("mov %%r14, %0" : "=m" (calleeSaveRegs[4]));
61 ASMV("mov %%r15, %0" : "=m" (calleeSaveRegs[5])); 63 ASMV("mov %%r15, %0" : "=m" (calleeSaveRegs[5]));
62 ASMV("mov %%rsp, %0" : "=r" (stackTop) :); /* stackTop = rsp */ 64
63 65 return StackScanInner(ss, stackBot, calleeSaveRegs, NELEMS(calleeSaveRegs));
64 AVER(AddrIsAligned((Addr)stackTop, sizeof(Addr))); /* .assume.align */
65 res = TraceScanAreaTagged(ss, stackTop, stackBot);
66
67 return res;
68} 66}
69 67
70 68
diff --git a/mps/code/ssw3i3mv.c b/mps/code/ssw3i3mv.c
new file mode 100755
index 00000000000..e09b53ec620
--- /dev/null
+++ b/mps/code/ssw3i3mv.c
@@ -0,0 +1,35 @@
1/* ssw3mv.c: STACK SCANNING FOR WIN32 WITH MICROSOFT C
2 *
3 * $Id$
4 * Copyright (c) 2001 Ravenbrook Limited. See end of file for license.
5 *
6 * This scans the stack and fixes the registers which may contain roots.
7 * See <design/thread-manager/>.
8 */
9
10#include "mpm.h"
11#include <setjmp.h>
12
13SRCID(ssw3mv, "$Id$");
14
15
16Res StackScan(ScanState ss, Addr *stackBot)
17{
18 jmp_buf jb;
19
20 /* We rely on the fact that Microsoft C's setjmp stores the callee-save
21 registers in the jmp_buf. */
22 (void)setjmp(jb);
23
24 /* These checks will just serve to warn us at compile-time if the
25 setjmp.h header changes to indicate that the registers we want aren't
26 saved any more. */
27 AVER(sizeof(((_JUMP_BUFFER *)jb)->Edi) == sizeof(Addr));
28 AVER(sizeof(((_JUMP_BUFFER *)jb)->Esi) == sizeof(Addr));
29 AVER(sizeof(((_JUMP_BUFFER *)jb)->Ebx) == sizeof(Addr));
30
31 AVER(offsetof(_JUMP_BUFFER, Edi) == offsetof(_JUMP_BUFFER, Ebx) + 4);
32 AVER(offsetof(_JUMP_BUFFER, Esi) == offsetof(_JUMP_BUFFER, Ebx) + 8);
33
34 return StackScanInner(ss, stackBot, (Addr *)&((_JUMP_BUFFER *)jb)->Ebx, 3);
35}
diff --git a/mps/code/ssw3mv.c b/mps/code/ssw3i6mv.c
index 26753b79232..d3e1dd85f46 100755
--- a/mps/code/ssw3mv.c
+++ b/mps/code/ssw3i6mv.c
@@ -16,7 +16,6 @@ SRCID(ssw3mv, "$Id$");
16Res StackScan(ScanState ss, Addr *stackBot) 16Res StackScan(ScanState ss, Addr *stackBot)
17{ 17{
18 jmp_buf jb; 18 jmp_buf jb;
19 Addr *stackTop;
20 19
21 /* We rely on the fact that Microsoft C's setjmp stores the callee-save 20 /* We rely on the fact that Microsoft C's setjmp stores the callee-save
22 registers in the jmp_buf. */ 21 registers in the jmp_buf. */
@@ -25,11 +24,6 @@ Res StackScan(ScanState ss, Addr *stackBot)
25 /* These checks will just serve to warn us at compile-time if the 24 /* These checks will just serve to warn us at compile-time if the
26 setjmp.h header changes to indicate that the registers we want aren't 25 setjmp.h header changes to indicate that the registers we want aren't
27 saved any more. */ 26 saved any more. */
28#if defined(MPS_ARCH_I3)
29 AVER(sizeof(((_JUMP_BUFFER *)jb)->Edi) == sizeof(Addr));
30 AVER(sizeof(((_JUMP_BUFFER *)jb)->Esi) == sizeof(Addr));
31 AVER(sizeof(((_JUMP_BUFFER *)jb)->Ebx) == sizeof(Addr));
32#elif defined(MPS_ARCH_I6)
33 AVER(sizeof(((_JUMP_BUFFER *)jb)->Rdi) == sizeof(Addr)); 27 AVER(sizeof(((_JUMP_BUFFER *)jb)->Rdi) == sizeof(Addr));
34 AVER(sizeof(((_JUMP_BUFFER *)jb)->Rsi) == sizeof(Addr)); 28 AVER(sizeof(((_JUMP_BUFFER *)jb)->Rsi) == sizeof(Addr));
35 AVER(sizeof(((_JUMP_BUFFER *)jb)->Rbp) == sizeof(Addr)); 29 AVER(sizeof(((_JUMP_BUFFER *)jb)->Rbp) == sizeof(Addr));
@@ -37,12 +31,16 @@ Res StackScan(ScanState ss, Addr *stackBot)
37 AVER(sizeof(((_JUMP_BUFFER *)jb)->R13) == sizeof(Addr)); 31 AVER(sizeof(((_JUMP_BUFFER *)jb)->R13) == sizeof(Addr));
38 AVER(sizeof(((_JUMP_BUFFER *)jb)->R14) == sizeof(Addr)); 32 AVER(sizeof(((_JUMP_BUFFER *)jb)->R14) == sizeof(Addr));
39 AVER(sizeof(((_JUMP_BUFFER *)jb)->R15) == sizeof(Addr)); 33 AVER(sizeof(((_JUMP_BUFFER *)jb)->R15) == sizeof(Addr));
40#else
41#error "StackScan not verified for the target architecture"
42#endif
43 34
44 stackTop = (Addr *)jb; 35 /* The layout of the jmp_buf forces us to harmlessly scan Rsp as well. */
45 AVER(AddrIsAligned((Addr)stackTop, sizeof(Addr))); /* .align */ 36 AVER(offsetof(_JUMP_BUFFER, Rsp) == offsetof(_JUMP_BUFFER, Rbx) + 8);
46 37 AVER(offsetof(_JUMP_BUFFER, Rbp) == offsetof(_JUMP_BUFFER, Rbx) + 16);
47 return TraceScanAreaTagged(ss, stackTop, stackBot); 38 AVER(offsetof(_JUMP_BUFFER, Rsi) == offsetof(_JUMP_BUFFER, Rbx) + 24);
39 AVER(offsetof(_JUMP_BUFFER, Rdi) == offsetof(_JUMP_BUFFER, Rbx) + 32);
40 AVER(offsetof(_JUMP_BUFFER, R12) == offsetof(_JUMP_BUFFER, Rbx) + 40);
41 AVER(offsetof(_JUMP_BUFFER, R13) == offsetof(_JUMP_BUFFER, Rbx) + 48);
42 AVER(offsetof(_JUMP_BUFFER, R14) == offsetof(_JUMP_BUFFER, Rbx) + 56);
43 AVER(offsetof(_JUMP_BUFFER, R15) == offsetof(_JUMP_BUFFER, Rbx) + 64);
44
45 return StackScanInner(ss, stackBot, (Addr *)&((_JUMP_BUFFER *)jb)->Rbx, 9);
48} 46}
diff --git a/mps/code/table.c b/mps/code/table.c
index e141bf1d8e2..7e6b1ec7149 100644
--- a/mps/code/table.c
+++ b/mps/code/table.c
@@ -8,60 +8,75 @@
8 */ 8 */
9 9
10#include "table.h" 10#include "table.h"
11#include "mpmtypes.h" 11#include "mpm.h"
12 12
13#include <stddef.h> 13#include <stddef.h>
14#include <stdlib.h>
15#include <assert.h>
16#include <stdio.h>
17#include "mpstd.h"
18 14
19typedef unsigned long ulong;
20 15
16SRCID(table, "$Id$");
21 17
22#define tableUNUSED ((Word)0x2AB7E040)
23#define tableDELETED ((Word)0x2AB7EDE7)
24#define tableACTIVE ((Word)0x2AB7EAC2)
25 18
19/* TableHash -- return a hash value from an address
20 *
21 * This uses a single cycle of an MLCG, more commonly seen as a
22 * pseudorandom number generator. It works extremely well as a
23 * hash function.
24 *
25 * (In particular, it is substantially better than simply doing this:
26 * seed = (unsigned long)addr * 48271;
27 * Tested by RHSK 2010-12-28.)
28 *
29 * This MLCG is a full period generator: it cycles through every
30 * number from 1 to m-1 before repeating. Therefore, no two numbers
31 * in that range hash to the same value. Furthermore, it has prime
32 * modulus, which tends to avoid recurring patterns in the low-order
33 * bits, which is good because the hash will be used modulus the
34 * number of slots in the table.
35 *
36 * Of course it's only a 31-bit cycle, so we start by losing the top
37 * bit of the address, but that's hardly a great problem.
38 *
39 * The implementation is quite subtle. See rnd() in testlib.c, where
40 * it has been exhaustively (ie: totally) tested. RHSK 2010-12-28.
41 *
42 * TODO: Check how this works out on 64-bit. RB 2012-09-04
43 */
44
45#define R_m 2147483647UL
46#define R_a 48271UL
26 47
27typedef struct TableEntryStruct *TableEntry; 48typedef Word Hash;
28typedef struct TableEntryStruct {
29 Word status;
30 Word key;
31 void *value;
32} TableEntryStruct;
33
34
35typedef struct TableStruct {
36 size_t length;
37 size_t count;
38 size_t limit;
39 TableEntry array;
40} TableStruct;
41
42 49
50static Hash TableHash(Word key)
51{
52 Hash seed = (Hash)(key & 0x7FFFFFFF);
53 /* requires m == 2^31-1, a < 2^16 */
54 Hash bot = R_a * (seed & 0x7FFF);
55 Hash top = R_a * (seed >> 15);
56 seed = bot + ((top & 0xFFFF) << 15) + (top >> 16);
57 if(seed > R_m)
58 seed -= R_m;
59 return seed;
60}
43 61
44/* sizeFloorLog2 -- logarithm base 2 */
45 62
46static size_t sizeFloorLog2(size_t size) 63Bool TableCheck(Table table)
47{ 64{
48 size_t l = 0; 65 CHECKS(Table, table);
49 66 CHECKL(table->count <= table->length);
50 assert(size != 0); 67 CHECKL(table->length == 0 || table->array != NULL);
51 while(size > 1) { 68 CHECKL(FUNCHECK(table->alloc));
52 ++l; 69 CHECKL(FUNCHECK(table->free));
53 size >>= 1; 70 /* can't check allocClosure -- it could be anything */
54 } 71 CHECKL(table->unusedKey != table->deletedKey);
55 return l; 72 return TRUE;
56} 73}
57 74
58 75
59/* TableHash -- table hashing function */ 76static Bool entryIsActive(Table table, TableEntry entry)
60
61static ulong TableHash(Word key)
62{ 77{
63 /* Shift some randomness into the low bits. */ 78 return !(entry->key == table->unusedKey ||
64 return (ulong)((key >> 10) + key); 79 entry->key == table->deletedKey);
65} 80}
66 81
67 82
@@ -72,69 +87,126 @@ static ulong TableHash(Word key)
72 * that all the items still fit in after growing the table. 87 * that all the items still fit in after growing the table.
73 */ 88 */
74 89
75static TableEntry TableFind(Table table, Word key, int skip_deleted) 90static TableEntry TableFind(Table table, Word key, Bool skip_deleted)
76{ 91{
77 ulong hash; 92 Hash hash;
78 size_t i, mask = table->length - 1; 93 Index i;
79 94 Word mask;
95
96 /* .find.visit: Ensure the length is a power of two so that the stride
97 is coprime and so visits all entries in the array eventually. */
98 AVER(WordIsP2(table->length)); /* .find.visit */
99
100 mask = table->length - 1;
80 hash = TableHash(key) & mask; 101 hash = TableHash(key) & mask;
81 i = hash; 102 i = hash;
82 do { 103 do {
83 switch (table->array[i].status) { 104 Word k = table->array[i].key;
84 case tableACTIVE: 105 if (k == key ||
85 if (table->array[i].key == key) 106 k == table->unusedKey ||
86 return &table->array[i]; 107 (!skip_deleted && key == table->deletedKey))
87 break;
88 case tableDELETED:
89 if (!skip_deleted)
90 return &table->array[i];
91 break;
92 case tableUNUSED:
93 return &table->array[i]; 108 return &table->array[i];
94 break; 109 i = (i + (hash | 1)) & mask; /* .find.visit */
95 }
96 i = (i + 1) & mask;
97 } while(i != hash); 110 } while(i != hash);
98 111
99 return NULL; 112 return NULL;
100} 113}
101 114
102 115
103/* TableGrow -- doubles the size of the table */ 116/* TableGrow -- increase the capacity of the table
117 *
118 * Ensure the transform's hashtable can accommodate N entries (filled
119 * slots), without becoming cramped. If necessary, resize the
120 * hashtable by allocating a new one and rehashing all old entries.
121 * If insufficient memory, return error without modifying table.
122 *
123 * .hash.spacefraction: As with all closed hash tables, we must choose
124 * an appropriate proportion of slots to remain free. More free slots
125 * help avoid large-sized contiguous clumps of full cells and their
126 * associated linear search costs.
127 *
128 * .hash.initial: Any reasonable number.
129 *
130 * .hash.growth: A compromise between space inefficency (growing bigger
131 * than required) and time inefficiency (growing too slowly, with all
132 * the rehash costs at every step). A factor of 2 means that at the
133 * point of growing to a size X table, hash-work equivalent to filling
134 * a size-X table has already been done. So we do at most 2x the
135 * hash-work we would have done if we had been able to guess the right
136 * table size initially.
137 *
138 * Numbers of slots maintain this relation:
139 * occupancy <= capacity < enough <= cSlots
140 */
141
142#define SPACEFRACTION 0.75 /* .hash.spacefraction */
104 143
105static Res TableGrow(Table table) 144Res TableGrow(Table table, Count extraCapacity)
106{ 145{
107 TableEntry oldArray, newArray; 146 TableEntry oldArray, newArray;
108 size_t i, oldLength, newLength; 147 Count oldLength, newLength;
148 Count required, minimum;
149 Count i, found;
150
151 required = table->count + extraCapacity;
152 if (required < table->count) /* overflow? */
153 return ResLIMIT;
154
155 /* Calculate the minimum table length that would allow for the required
156 capacity without growing again. */
157 minimum = (Count)(required / SPACEFRACTION);
158 if (minimum < required) /* overflow? */
159 return ResLIMIT;
109 160
161 /* Double the table length until it's larger than the minimum */
110 oldLength = table->length; 162 oldLength = table->length;
163 newLength = oldLength;
164 while(newLength < minimum) {
165 Count doubled = newLength > 0 ? newLength * 2 : 1; /* .hash.growth */
166 if (doubled <= newLength) /* overflow? */
167 return ResLIMIT;
168 newLength = doubled;
169 }
170
171 if (newLength == oldLength) /* already enough space? */
172 return ResOK;
173
174 /* TODO: An event would be good here */
175
111 oldArray = table->array; 176 oldArray = table->array;
112 newLength = oldLength * 2; 177 newArray = table->alloc(table->allocClosure,
113 newArray = malloc(sizeof(TableEntryStruct) * newLength); 178 sizeof(TableEntryStruct) * newLength);
114 if(newArray == NULL) return ResMEMORY; 179 if(newArray == NULL)
180 return ResMEMORY;
115 181
116 for(i = 0; i < newLength; ++i) { 182 for(i = 0; i < newLength; ++i) {
117 newArray[i].key = 0; 183 newArray[i].key = table->unusedKey;
118 newArray[i].value = NULL; 184 newArray[i].value = NULL;
119 newArray[i].status = tableUNUSED;
120 } 185 }
121 186
122 table->length = newLength; 187 table->length = newLength;
123 table->array = newArray; 188 table->array = newArray;
124 table->limit *= 2;
125 189
190 found = 0;
126 for(i = 0; i < oldLength; ++i) { 191 for(i = 0; i < oldLength; ++i) {
127 if (oldArray[i].status == tableACTIVE) { 192 if (entryIsActive(table, &oldArray[i])) {
128 TableEntry entry; 193 TableEntry entry;
129 entry = TableFind(table, oldArray[i].key, 0 /* none deleted */); 194 entry = TableFind(table, oldArray[i].key, FALSE /* none deleted */);
130 assert(entry != NULL); 195 AVER(entry != NULL);
131 assert(entry->status == tableUNUSED); 196 AVER(entry->key == table->unusedKey);
132 entry->key = oldArray[i].key; 197 entry->key = oldArray[i].key;
133 entry->value = oldArray[i].value; 198 entry->value = oldArray[i].value;
134 entry->status = tableACTIVE; 199 ++found;
135 } 200 }
136 } 201 }
137 free(oldArray); 202 AVER(found == table->count);
203
204 if (oldLength > 0) {
205 AVER(oldArray != NULL);
206 table->free(table->allocClosure,
207 oldArray,
208 sizeof(TableEntryStruct) * oldLength);
209 }
138 210
139 return ResOK; 211 return ResOK;
140} 212}
@@ -142,35 +214,44 @@ static Res TableGrow(Table table)
142 214
143/* TableCreate -- makes a new table */ 215/* TableCreate -- makes a new table */
144 216
145extern Res TableCreate(Table *tableReturn, size_t length) 217extern Res TableCreate(Table *tableReturn,
218 Count length,
219 TableAllocMethod tableAlloc,
220 TableFreeMethod tableFree,
221 void *allocClosure,
222 Word unusedKey,
223 Word deletedKey)
146{ 224{
147 Table table; 225 Table table;
148 size_t i; 226 Res res;
227
228 AVER(tableReturn != NULL);
229 AVER(FUNCHECK(tableAlloc));
230 AVER(FUNCHECK(tableFree));
231 AVER(unusedKey != deletedKey);
149 232
150 assert(tableReturn != NULL); 233 table = tableAlloc(allocClosure, sizeof(TableStruct));
234 if(table == NULL)
235 return ResMEMORY;
151 236
152 table = malloc(sizeof(TableStruct)); 237 table->length = 0;
153 if(table == NULL) goto failMallocTable;
154 if (length < 2) length = 2;
155 /* Table size is length rounded up to the next power of 2. */
156 table->length = (size_t)1 << (sizeFloorLog2(length-1) + 1);
157 table->count = 0; 238 table->count = 0;
158 table->limit = (size_t)(.5 * length); 239 table->array = NULL;
159 table->array = malloc(sizeof(TableEntryStruct) * length); 240 table->alloc = tableAlloc;
160 if(table->array == NULL) goto failMallocArray; 241 table->free = tableFree;
161 for(i = 0; i < length; ++i) { 242 table->allocClosure = allocClosure;
162 table->array[i].key = 0; 243 table->unusedKey = unusedKey;
163 table->array[i].value = NULL; 244 table->deletedKey = deletedKey;
164 table->array[i].status = tableUNUSED; 245 table->sig = TableSig;
165 } 246
247 AVERT(Table, table);
248
249 res = TableGrow(table, length);
250 if (res != ResOK)
251 return res;
166 252
167 *tableReturn = table; 253 *tableReturn = table;
168 return ResOK; 254 return ResOK;
169
170failMallocArray:
171 free(table);
172failMallocTable:
173 return ResMEMORY;
174} 255}
175 256
176 257
@@ -178,9 +259,15 @@ failMallocTable:
178 259
179extern void TableDestroy(Table table) 260extern void TableDestroy(Table table)
180{ 261{
181 assert(table != NULL); 262 AVER(table != NULL);
182 free(table->array); 263 if (table->length > 0) {
183 free(table); 264 AVER(table->array != NULL);
265 table->free(table->allocClosure,
266 table->array,
267 sizeof(TableEntryStruct) * table->length);
268 }
269 table->sig = SigInvalid;
270 table->free(table->allocClosure, table, sizeof(TableStruct));
184} 271}
185 272
186 273
@@ -188,9 +275,9 @@ extern void TableDestroy(Table table)
188 275
189extern Bool TableLookup(void **valueReturn, Table table, Word key) 276extern Bool TableLookup(void **valueReturn, Table table, Word key)
190{ 277{
191 TableEntry entry = TableFind(table, key, 1 /* skip deleted */); 278 TableEntry entry = TableFind(table, key, TRUE /* skip deleted */);
192 279
193 if(entry == NULL || entry->status != tableACTIVE) 280 if(entry == NULL || !entryIsActive(table, entry))
194 return FALSE; 281 return FALSE;
195 *valueReturn = entry->value; 282 *valueReturn = entry->value;
196 return TRUE; 283 return TRUE;
@@ -202,24 +289,26 @@ extern Bool TableLookup(void **valueReturn, Table table, Word key)
202extern Res TableDefine(Table table, Word key, void *value) 289extern Res TableDefine(Table table, Word key, void *value)
203{ 290{
204 TableEntry entry; 291 TableEntry entry;
292
293 AVER(key != table->unusedKey);
294 AVER(key != table->deletedKey);
205 295
206 if (table->count >= table->limit) { 296 if (table->count >= table->length * SPACEFRACTION) {
207 Res res = TableGrow(table); 297 Res res = TableGrow(table, 1);
208 if(res != ResOK) return res; 298 if(res != ResOK) return res;
209 entry = TableFind(table, key, 0 /* no deletions yet */); 299 entry = TableFind(table, key, FALSE /* no deletions yet */);
210 assert(entry != NULL); 300 AVER(entry != NULL);
211 if (entry->status == tableACTIVE) 301 if (entryIsActive(table, entry))
212 return ResFAIL; 302 return ResFAIL;
213 } else { 303 } else {
214 entry = TableFind(table, key, 1 /* skip deleted */); 304 entry = TableFind(table, key, TRUE /* skip deleted */);
215 if (entry != NULL && entry->status == tableACTIVE) 305 if (entry != NULL && entryIsActive(table, entry))
216 return ResFAIL; 306 return ResFAIL;
217 /* Search again to find the best slot, deletions included. */ 307 /* Search again to find the best slot, deletions included. */
218 entry = TableFind(table, key, 0 /* don't skip deleted */); 308 entry = TableFind(table, key, FALSE /* don't skip deleted */);
219 assert(entry != NULL); 309 AVER(entry != NULL);
220 } 310 }
221 311
222 entry->status = tableACTIVE;
223 entry->key = key; 312 entry->key = key;
224 entry->value = value; 313 entry->value = value;
225 ++table->count; 314 ++table->count;
@@ -232,11 +321,11 @@ extern Res TableDefine(Table table, Word key, void *value)
232 321
233extern Res TableRedefine(Table table, Word key, void *value) 322extern Res TableRedefine(Table table, Word key, void *value)
234{ 323{
235 TableEntry entry = TableFind(table, key, 1 /* skip deletions */); 324 TableEntry entry = TableFind(table, key, TRUE /* skip deletions */);
236 325
237 if (entry == NULL || entry->status != tableACTIVE) 326 if (entry == NULL || !entryIsActive(table, entry))
238 return ResFAIL; 327 return ResFAIL;
239 assert(entry->key == key); 328 AVER(entry->key == key);
240 entry->value = value; 329 entry->value = value;
241 return ResOK; 330 return ResOK;
242} 331}
@@ -246,11 +335,11 @@ extern Res TableRedefine(Table table, Word key, void *value)
246 335
247extern Res TableRemove(Table table, Word key) 336extern Res TableRemove(Table table, Word key)
248{ 337{
249 TableEntry entry = TableFind(table, key, 1); 338 TableEntry entry = TableFind(table, key, TRUE);
250 339
251 if (entry == NULL || entry->status != tableACTIVE) 340 if (entry == NULL || !entryIsActive(table, entry))
252 return ResFAIL; 341 return ResFAIL;
253 entry->status = tableDELETED; 342 entry->key = table->deletedKey;
254 --table->count; 343 --table->count;
255 return ResOK; 344 return ResOK;
256} 345}
@@ -258,18 +347,20 @@ extern Res TableRemove(Table table, Word key)
258 347
259/* TableMap -- apply a function to all the mappings */ 348/* TableMap -- apply a function to all the mappings */
260 349
261extern void TableMap(Table table, void(*fun)(Word key, void*value)) 350extern void TableMap(Table table,
351 void (*fun)(void *closure, Word key, void*value),
352 void *closure)
262{ 353{
263 size_t i; 354 Index i;
264 for (i = 0; i < table->length; i++) 355 for (i = 0; i < table->length; i++)
265 if (table->array[i].status == tableACTIVE) 356 if (entryIsActive(table, &table->array[i]))
266 (*fun)(table->array[i].key, table->array[i].value); 357 (*fun)(closure, table->array[i].key, table->array[i].value);
267} 358}
268 359
269 360
270/* TableCount -- count the number of mappings in the table */ 361/* TableCount -- count the number of mappings in the table */
271 362
272extern size_t TableCount(Table table) 363extern Count TableCount(Table table)
273{ 364{
274 return table->count; 365 return table->count;
275} 366}
diff --git a/mps/code/table.h b/mps/code/table.h
index 1b9840a7b25..1697be09b9b 100644
--- a/mps/code/table.h
+++ b/mps/code/table.h
@@ -13,14 +13,46 @@
13 13
14typedef struct TableStruct *Table; 14typedef struct TableStruct *Table;
15 15
16extern Res TableCreate(Table *tableReturn, size_t length); 16#define TableSig ((Sig)0x5192AB13) /* SIGnature TABLE */
17
18typedef void *(*TableAllocMethod)(void *closure, size_t size);
19typedef void (*TableFreeMethod)(void *closure, void *p, size_t size);
20
21typedef struct TableEntryStruct {
22 Word key;
23 void *value;
24} TableEntryStruct, *TableEntry;
25
26typedef struct TableStruct {
27 Sig sig; /* <design/sig/> */
28 Count length; /* Number of slots in the array */
29 Count count; /* Active entries in the table */
30 TableEntry array; /* Array of table slots */
31 TableAllocMethod alloc;
32 TableFreeMethod free;
33 void *allocClosure;
34 Word unusedKey; /* key marking unused (undefined) entries */
35 Word deletedKey; /* key marking deleted entries */
36} TableStruct;
37
38extern Res TableCreate(Table *tableReturn,
39 Count length,
40 TableAllocMethod tableAlloc,
41 TableFreeMethod tableFree,
42 void *allocClosure,
43 Word unusedKey,
44 Word deletedKey);
17extern void TableDestroy(Table table); 45extern void TableDestroy(Table table);
46extern Bool TableCheck(Table table);
18extern Res TableDefine(Table table, Word key, void *value); 47extern Res TableDefine(Table table, Word key, void *value);
19extern Res TableRedefine(Table table, Word key, void *value); 48extern Res TableRedefine(Table table, Word key, void *value);
20extern Bool TableLookup(void **valueReturn, Table table, Word key); 49extern Bool TableLookup(void **valueReturn, Table table, Word key);
21extern Res TableRemove(Table table, Word key); 50extern Res TableRemove(Table table, Word key);
22extern size_t TableCount(Table table); 51extern Count TableCount(Table table);
23extern void TableMap(Table table, void(*fun)(Word key, void *value)); 52extern void TableMap(Table table,
53 void(*fun)(void *closure, Word key, void *value),
54 void *closure);
55extern Res TableGrow(Table table, Count extraCapacity);
24 56
25 57
26#endif /* table_h */ 58#endif /* table_h */
diff --git a/mps/code/testlib.c b/mps/code/testlib.c
index d7f4bc60418..06d0641c688 100644
--- a/mps/code/testlib.c
+++ b/mps/code/testlib.c
@@ -32,6 +32,15 @@ struct itimerspec; /* stop complaints from time.h */
32#endif 32#endif
33 33
34 34
35/* fail -- like assert, but (notionally) returns a value, so usable in an expression */
36
37int fail(void)
38{
39 Insist(FALSE);
40 return 1111UL;
41}
42
43
35/* rnd -- a random number generator 44/* rnd -- a random number generator
36 * 45 *
37 * We use the (Multiplicative) Linear Congruential Generator 46 * We use the (Multiplicative) Linear Congruential Generator
diff --git a/mps/code/testlib.h b/mps/code/testlib.h
index 3d187080fc2..d16f4fb6eef 100644
--- a/mps/code/testlib.h
+++ b/mps/code/testlib.h
@@ -88,6 +88,7 @@
88#define PRIXLONGEST "llX" 88#define PRIXLONGEST "llX"
89#define PRIwWORD "16" 89#define PRIwWORD "16"
90typedef unsigned long long ulongest_t; 90typedef unsigned long long ulongest_t;
91typedef long long longest_t;
91#define MPS_WORD_CONST(n) (n##ull) 92#define MPS_WORD_CONST(n) (n##ull)
92#else 93#else
93#define PRIuLONGEST "lu" 94#define PRIuLONGEST "lu"
@@ -95,6 +96,7 @@ typedef unsigned long long ulongest_t;
95#define PRIXLONGEST "lX" 96#define PRIXLONGEST "lX"
96#define PRIwWORD "8" 97#define PRIwWORD "8"
97typedef unsigned long ulongest_t; 98typedef unsigned long ulongest_t;
99typedef long longest_t;
98#define MPS_WORD_CONST(n) (n##ul) 100#define MPS_WORD_CONST(n) (n##ul)
99#endif 101#endif
100#define PRIXPTR "0"PRIwWORD PRIXLONGEST 102#define PRIXPTR "0"PRIwWORD PRIXLONGEST
@@ -164,6 +166,11 @@ extern void verror(const char *format, va_list args);
164 cdie(cond, condstring "\n" __FILE__ "\n" STR(__LINE__)) 166 cdie(cond, condstring "\n" __FILE__ "\n" STR(__LINE__))
165 167
166 168
169/* fail -- like assert, but (notionally) returns a value, so usable in an expression */
170
171extern int fail(void);
172
173
167/* rnd -- random number generator 174/* rnd -- random number generator
168 * 175 *
169 * rnd() generates a sequence of integers in the range [1, 2^31-2]. 176 * rnd() generates a sequence of integers in the range [1, 2^31-2].
diff --git a/mps/code/trace.c b/mps/code/trace.c
index 8ff9bf8bdf1..9771797337d 100644
--- a/mps/code/trace.c
+++ b/mps/code/trace.c
@@ -39,6 +39,7 @@ Bool ScanStateCheck(ScanState ss)
39 39
40 CHECKS(ScanState, ss); 40 CHECKS(ScanState, ss);
41 CHECKL(FUNCHECK(ss->fix)); 41 CHECKL(FUNCHECK(ss->fix));
42 /* Can't check ss->fixClosure. */
42 CHECKL(ss->zoneShift == ss->arena->zoneShift); 43 CHECKL(ss->zoneShift == ss->arena->zoneShift);
43 white = ZoneSetEMPTY; 44 white = ZoneSetEMPTY;
44 TRACE_SET_ITER(ti, trace, ss->traces, ss->arena) 45 TRACE_SET_ITER(ti, trace, ss->traces, ss->arena)
@@ -69,12 +70,28 @@ void ScanStateInit(ScanState ss, TraceSet ts, Arena arena,
69 AVER(RankCheck(rank)); 70 AVER(RankCheck(rank));
70 /* white is arbitrary and can't be checked */ 71 /* white is arbitrary and can't be checked */
71 72
72 ss->fix = TraceFix; 73 /* NOTE: We can only currently support scanning for a set of traces with
73 TRACE_SET_ITER(ti, trace, ts, arena) 74 the same fix method and closure. To remove this restriction,
74 if(trace->emergency) { 75 it would be necessary to dispatch to the fix methods of sets of traces
75 ss->fix = TraceFixEmergency; 76 in TraceFix. */
77 ss->fix = NULL;
78 ss->fixClosure = NULL;
79 TRACE_SET_ITER(ti, trace, ts, arena) {
80 if (ss->fix == NULL) {
81 ss->fix = trace->fix;
82 ss->fixClosure = trace->fixClosure;
83 } else {
84 AVER(ss->fix == trace->fix);
85 AVER(ss->fixClosure == trace->fixClosure);
76 } 86 }
77 TRACE_SET_ITER_END(ti, trace, ts, arena); 87 } TRACE_SET_ITER_END(ti, trace, ts, arena);
88 AVER(ss->fix != NULL);
89
90 /* If the fix method is the normal GC fix, then we optimise the test for
91 whether it's an emergency or not by updating the dispatch here, once. */
92 if (ss->fix == PoolFix && ArenaEmergency(arena))
93 ss->fix = PoolFixEmergency;
94
78 ss->rank = rank; 95 ss->rank = rank;
79 ss->traces = ts; 96 ss->traces = ts;
80 ss->zoneShift = arena->zoneShift; 97 ss->zoneShift = arena->zoneShift;
@@ -174,10 +191,11 @@ Bool TraceCheck(Trace trace)
174 if(trace->state == TraceFLIPPED) { 191 if(trace->state == TraceFLIPPED) {
175 CHECKL(RankCheck(trace->band)); 192 CHECKL(RankCheck(trace->band));
176 } 193 }
177 CHECKL(BoolCheck(trace->emergency));
178 if(trace->chain != NULL) { 194 if(trace->chain != NULL) {
179 CHECKU(Chain, trace->chain); 195 CHECKU(Chain, trace->chain);
180 } 196 }
197 CHECKL(FUNCHECK(trace->fix));
198 /* Can't check trace->fixClosure. */
181 199
182 /* @@@@ checks for counts missing */ 200 /* @@@@ checks for counts missing */
183 201
@@ -303,24 +321,6 @@ static void traceSetUpdateCounts(TraceSet ts, Arena arena, ScanState ss,
303} 321}
304 322
305 323
306/* traceSetSignalEmergency -- move a set of traces into emergency mode. */
307
308static void traceSetSignalEmergency(TraceSet ts, Arena arena)
309{
310 TraceId ti;
311 Trace trace;
312
313 DIAG_SINGLEF(( "traceSetSignalEmergency",
314 "traceSet: $B", (WriteFB)ts, NULL ));
315
316 TRACE_SET_ITER(ti, trace, ts, arena)
317 trace->emergency = TRUE;
318 TRACE_SET_ITER_END(ti, trace, ts, arena);
319
320 return;
321}
322
323
324/* traceSetWhiteUnion 324/* traceSetWhiteUnion
325 * 325 *
326 * Returns a ZoneSet describing the union of the white sets of all the 326 * Returns a ZoneSet describing the union of the white sets of all the
@@ -360,7 +360,7 @@ Res TraceAddWhite(Trace trace, Seg seg)
360 if(res != ResOK) 360 if(res != ResOK)
361 return res; 361 return res;
362 362
363 /* Add the segment to the approximation of the white set the */ 363 /* Add the segment to the approximation of the white set if the */
364 /* pool made it white. */ 364 /* pool made it white. */
365 if(TraceSetIsMember(SegWhite(seg), trace)) { 365 if(TraceSetIsMember(SegWhite(seg), trace)) {
366 trace->white = ZoneSetUnion(trace->white, ZoneSetOfSeg(trace->arena, seg)); 366 trace->white = ZoneSetUnion(trace->white, ZoneSetOfSeg(trace->arena, seg));
@@ -474,23 +474,23 @@ static Res traceScanRootRes(TraceSet ts, Rank rank, Arena arena, Root root)
474 474
475/* traceScanRoot 475/* traceScanRoot
476 * 476 *
477 * Scan a root without fail. The traces may enter emergency mode to 477 * Scan a root, entering emergency mode on allocation failure.
478 * ensure this. */ 478 */
479 479
480static void traceScanRoot(TraceSet ts, Rank rank, Arena arena, Root root) 480static Res traceScanRoot(TraceSet ts, Rank rank, Arena arena, Root root)
481{ 481{
482 Res res; 482 Res res;
483 483
484 res = traceScanRootRes(ts, rank, arena, root); 484 res = traceScanRootRes(ts, rank, arena, root);
485 if(res != ResOK) { 485
486 AVER(ResIsAllocFailure(res)); 486 if (ResIsAllocFailure(res)) {
487 traceSetSignalEmergency(ts, arena); 487 ArenaSetEmergency(arena, TRUE);
488 res = traceScanRootRes(ts, rank, arena, root); 488 res = traceScanRootRes(ts, rank, arena, root);
489 /* Should be OK in emergency mode */ 489 /* Should be OK in emergency mode */
490 AVER(!ResIsAllocFailure(res));
490 } 491 }
491 AVER(ResOK == res);
492 492
493 return; 493 return res;
494} 494}
495 495
496 496
@@ -505,6 +505,7 @@ struct rootFlipClosureStruct {
505static Res rootFlip(Root root, void *p) 505static Res rootFlip(Root root, void *p)
506{ 506{
507 struct rootFlipClosureStruct *rf = (struct rootFlipClosureStruct *)p; 507 struct rootFlipClosureStruct *rf = (struct rootFlipClosureStruct *)p;
508 Res res;
508 509
509 AVERT(Root, root); 510 AVERT(Root, root);
510 AVER(p != NULL); 511 AVER(p != NULL);
@@ -514,18 +515,47 @@ static Res rootFlip(Root root, void *p)
514 515
515 AVER(RootRank(root) <= RankEXACT); /* see .root.rank */ 516 AVER(RootRank(root) <= RankEXACT); /* see .root.rank */
516 517
517 if(RootRank(root) == rf->rank) 518 if(RootRank(root) == rf->rank) {
518 traceScanRoot(rf->ts, rf->rank, rf->arena, root); 519 res = traceScanRoot(rf->ts, rf->rank, rf->arena, root);
520 if (res != ResOK)
521 return res;
522 }
519 523
520 return ResOK; 524 return ResOK;
521} 525}
522 526
523static void traceFlip(Trace trace) 527
528/* traceFlip -- flip the mutator from grey to black w.r.t. a trace
529 *
530 * The main job of traceFlip is to scan references which can't be protected
531 * from the mutator, changing the colour of the mutator from grey to black
532 * with respect to a trace. The mutator threads are suspended while this
533 * is happening, and the mutator perceives and instantaneous change in all
534 * the references, enforced by the shield (barrier) system.
535 *
536 * NOTE: We don't have a way to shield the roots, so they are all scanned
537 * here. This is a coincidence. There is no particular reason that the
538 * roots have to be scanned at flip time. (The thread registers are unlikely
539 * ever to be protectable on stock hardware, however.)
540 *
541 * NOTE: Ambiguous references may only exist in roots, because we can't
542 * shield the exact roots and defer them for later scanning (after ambiguous
543 * heap references).
544 *
545 * NOTE: We don't support weak or final roots because we can't shield them
546 * and defer scanning until later. See above.
547 *
548 * If roots and segments were more similar, we could melt a lot of these
549 * problems.
550 */
551
552static Res traceFlip(Trace trace)
524{ 553{
525 Ring node, nextNode; 554 Ring node, nextNode;
526 Arena arena; 555 Arena arena;
527 Rank rank; 556 Rank rank;
528 struct rootFlipClosureStruct rfc; 557 struct rootFlipClosureStruct rfc;
558 Res res;
529 559
530 AVERT(Trace, trace); 560 AVERT(Trace, trace);
531 rfc.ts = TraceSetSingle(trace); 561 rfc.ts = TraceSetSingle(trace);
@@ -555,11 +585,10 @@ static void traceFlip(Trace trace)
555 /* higher ranking roots than data in pools. */ 585 /* higher ranking roots than data in pools. */
556 586
557 for(rank = RankAMBIG; rank <= RankEXACT; ++rank) { 587 for(rank = RankAMBIG; rank <= RankEXACT; ++rank) {
558 Res res;
559
560 rfc.rank = rank; 588 rfc.rank = rank;
561 res = RootsIterate(ArenaGlobals(arena), rootFlip, (void *)&rfc); 589 res = RootsIterate(ArenaGlobals(arena), rootFlip, (void *)&rfc);
562 AVER(res == ResOK); 590 if (res != ResOK)
591 goto failRootFlip;
563 } 592 }
564 593
565 /* .flip.alloc: Allocation needs to become black now. While we flip */ 594 /* .flip.alloc: Allocation needs to become black now. While we flip */
@@ -595,8 +624,11 @@ static void traceFlip(Trace trace)
595 EVENT2(TraceFlipEnd, trace, arena); 624 EVENT2(TraceFlipEnd, trace, arena);
596 625
597 ShieldResume(arena); 626 ShieldResume(arena);
627 return ResOK;
598 628
599 return; 629failRootFlip:
630 ShieldResume(arena);
631 return res;
600} 632}
601 633
602/* traceCopySizes -- preserve size information for later use 634/* traceCopySizes -- preserve size information for later use
@@ -668,7 +700,8 @@ found:
668 trace->ti = ti; 700 trace->ti = ti;
669 trace->state = TraceINIT; 701 trace->state = TraceINIT;
670 trace->band = RankAMBIG; /* Required to be the earliest rank. */ 702 trace->band = RankAMBIG; /* Required to be the earliest rank. */
671 trace->emergency = FALSE; 703 trace->fix = PoolFix;
704 trace->fixClosure = NULL;
672 trace->chain = NULL; 705 trace->chain = NULL;
673 STATISTIC(trace->preTraceArenaReserved = ArenaReserved(arena)); 706 STATISTIC(trace->preTraceArenaReserved = ArenaReserved(arena));
674 trace->condemned = (Size)0; /* nothing condemned yet */ 707 trace->condemned = (Size)0; /* nothing condemned yet */
@@ -908,7 +941,7 @@ Rank TraceRankForAccess(Arena arena, Seg seg)
908 * 941 *
909 * .check.ambig.not: RankAMBIG segments never appear on the grey ring. 942 * .check.ambig.not: RankAMBIG segments never appear on the grey ring.
910 * The current tracer cannot support ambiguous reference except as 943 * The current tracer cannot support ambiguous reference except as
911 * roots, so it's a buf if we ever find any. This behaviour is not set 944 * roots, so it's a bug if we ever find any. This behaviour is not set
912 * in stone, it's possible to imagine changing the tracer so that we can 945 * in stone, it's possible to imagine changing the tracer so that we can
913 * support ambiguous objects one day. For example, a fully conservative 946 * support ambiguous objects one day. For example, a fully conservative
914 * non-moving mode. 947 * non-moving mode.
@@ -1180,23 +1213,23 @@ static Res traceScanSegRes(TraceSet ts, Rank rank, Arena arena, Seg seg)
1180 1213
1181/* traceScanSeg 1214/* traceScanSeg
1182 * 1215 *
1183 * Scans a segment without fail. May put the traces into emergency mode 1216 * Scans a segment, switching to emergency mode if there is an allocation
1184 * to ensure this. */ 1217 * failure.
1218 */
1185 1219
1186static void traceScanSeg(TraceSet ts, Rank rank, Arena arena, Seg seg) 1220static Res traceScanSeg(TraceSet ts, Rank rank, Arena arena, Seg seg)
1187{ 1221{
1188 Res res; 1222 Res res;
1189 1223
1190 res = traceScanSegRes(ts, rank, arena, seg); 1224 res = traceScanSegRes(ts, rank, arena, seg);
1191 if(res != ResOK) { 1225 if(ResIsAllocFailure(res)) {
1192 AVER(ResIsAllocFailure(res)); 1226 ArenaSetEmergency(arena, TRUE);
1193 traceSetSignalEmergency(ts, arena);
1194 res = traceScanSegRes(ts, rank, arena, seg); 1227 res = traceScanSegRes(ts, rank, arena, seg);
1195 /* Should be OK in emergency mode. */ 1228 /* Should be OK in emergency mode. */
1229 AVER(!ResIsAllocFailure(res));
1196 } 1230 }
1197 AVER(ResOK == res);
1198 1231
1199 return; 1232 return res;
1200} 1233}
1201 1234
1202 1235
@@ -1204,6 +1237,8 @@ static void traceScanSeg(TraceSet ts, Rank rank, Arena arena, Seg seg)
1204 1237
1205void TraceSegAccess(Arena arena, Seg seg, AccessSet mode) 1238void TraceSegAccess(Arena arena, Seg seg, AccessSet mode)
1206{ 1239{
1240 Res res;
1241
1207 AVERT(Arena, arena); 1242 AVERT(Arena, arena);
1208 AVERT(Seg, seg); 1243 AVERT(Seg, seg);
1209 1244
@@ -1226,9 +1261,13 @@ void TraceSegAccess(Arena arena, Seg seg, AccessSet mode)
1226 1261
1227 /* Pick set of traces to scan for: */ 1262 /* Pick set of traces to scan for: */
1228 TraceSet traces = arena->flippedTraces; 1263 TraceSet traces = arena->flippedTraces;
1229
1230 rank = TraceRankForAccess(arena, seg); 1264 rank = TraceRankForAccess(arena, seg);
1231 traceScanSeg(traces, rank, arena, seg); 1265 res = traceScanSeg(traces, rank, arena, seg);
1266
1267 /* Allocation failures should be handled my emergency mode, and we don't
1268 expect any other kind of failure in a normal GC that causes access
1269 faults. */
1270 AVER(res == ResOK);
1232 1271
1233 /* The pool should've done the job of removing the greyness that */ 1272 /* The pool should've done the job of removing the greyness that */
1234 /* was causing the segment to be protected, so that the mutator */ 1273 /* was causing the segment to be protected, so that the mutator */
@@ -1254,20 +1293,31 @@ void TraceSegAccess(Arena arena, Seg seg, AccessSet mode)
1254} 1293}
1255 1294
1256 1295
1257/* TraceFix -- fix a reference */ 1296/* TraceFix2 -- second stage of fixing a reference
1297 *
1298 * TraceFix is on the critical path. A one-instruction difference in the
1299 * early parts of this code will have a significant impact on overall run
1300 * time. The priority is to eliminate irrelevant references early and fast
1301 * using the colour information stored in the tract table.
1302 */
1258 1303
1259Res TraceFix(ScanState ss, Ref *refIO) 1304static Res TraceFix2(ScanState ss, Ref *refIO)
1260{ 1305{
1261 Ref ref; 1306 Ref ref;
1262 Tract tract; 1307 Tract tract;
1263 Pool pool;
1264 1308
1309 /* Special AVER macros are used on the critical path. */
1265 /* See <design/trace/#fix.noaver> */ 1310 /* See <design/trace/#fix.noaver> */
1266 AVERT_CRITICAL(ScanState, ss); 1311 AVERT_CRITICAL(ScanState, ss);
1267 AVER_CRITICAL(refIO != NULL); 1312 AVER_CRITICAL(refIO != NULL);
1268 1313
1269 ref = *refIO; 1314 ref = *refIO;
1270 1315
1316 /* The zone test should already have been passed by MPS_FIX1 in mps.h. */
1317 AVER_CRITICAL(ZoneSetInter(ss->white,
1318 ZoneSetAdd(ss->arena, ZoneSetEMPTY, ref)) !=
1319 ZoneSetEMPTY);
1320
1271 STATISTIC(++ss->fixRefCount); 1321 STATISTIC(++ss->fixRefCount);
1272 EVENT4(TraceFix, ss, refIO, ref, ss->rank); 1322 EVENT4(TraceFix, ss, refIO, ref, ss->rank);
1273 1323
@@ -1277,17 +1327,18 @@ Res TraceFix(ScanState ss, Ref *refIO)
1277 Seg seg; 1327 Seg seg;
1278 if(TRACT_SEG(&seg, tract)) { 1328 if(TRACT_SEG(&seg, tract)) {
1279 Res res; 1329 Res res;
1330 Pool pool;
1280 STATISTIC(++ss->segRefCount); 1331 STATISTIC(++ss->segRefCount);
1281 STATISTIC(++ss->whiteSegRefCount); 1332 STATISTIC(++ss->whiteSegRefCount);
1282 EVENT1(TraceFixSeg, seg); 1333 EVENT1(TraceFixSeg, seg);
1283 EVENT0(TraceFixWhite); 1334 EVENT0(TraceFixWhite);
1284 pool = TractPool(tract); 1335 pool = TractPool(tract);
1285 /* Could move the rank switch here from the class-specific */ 1336 res = (*ss->fix)(pool, ss, seg, refIO);
1286 /* fix methods. */
1287 res = PoolFix(pool, ss, seg, refIO);
1288 if(res != ResOK) { 1337 if(res != ResOK) {
1289 /* Fix protocol (de facto): if Fix fails, ref must be unchanged */ 1338 /* PoolFixEmergency should never fail. */
1290 /* Justification for this restriction: 1339 AVER_CRITICAL(ss->fix != PoolFixEmergency);
1340 /* Fix protocol (de facto): if Fix fails, ref must be unchanged
1341 * Justification for this restriction:
1291 * A: it simplifies; 1342 * A: it simplifies;
1292 * B: it's reasonable (given what may cause Fix to fail); 1343 * B: it's reasonable (given what may cause Fix to fail);
1293 * C: the code (here) already assumes this: it returns without 1344 * C: the code (here) already assumes this: it returns without
@@ -1296,6 +1347,14 @@ Res TraceFix(ScanState ss, Ref *refIO)
1296 AVER(*refIO == ref); 1347 AVER(*refIO == ref);
1297 return res; 1348 return res;
1298 } 1349 }
1350 } else {
1351 /* Only tracts with segments ought to have been condemned. */
1352 /* SegOfAddr FALSE => a ref into a non-seg Tract (poolmv etc) */
1353 /* .notwhite: ...But it should NOT be white.
1354 * [I assert this both from logic, and from inspection of the
1355 * current condemn code. RHSK 2010-11-30]
1356 */
1357 NOTREACHED;
1299 } 1358 }
1300 } else { 1359 } else {
1301 /* Tract isn't white. Don't compute seg for non-statistical */ 1360 /* Tract isn't white. Don't compute seg for non-statistical */
@@ -1322,56 +1381,18 @@ Res TraceFix(ScanState ss, Ref *refIO)
1322} 1381}
1323 1382
1324 1383
1325/* TraceFixEmergency -- fix a reference in emergency mode */ 1384/* mps_fix2 -- external interface to TraceFix
1385 *
1386 * We rely on compiler inlining to make this equivalent to TraceFix, because
1387 * the name "TraceFix" is pervasive in the MPS. That's also why this
1388 * function is in trace.c and not mpsi.c.
1389 */
1326 1390
1327Res TraceFixEmergency(ScanState ss, Ref *refIO) 1391mps_res_t mps_fix2(mps_ss_t mps_ss, mps_addr_t *mps_ref_io)
1328{ 1392{
1329 Ref ref; 1393 ScanState ss = (ScanState)mps_ss;
1330 Tract tract; 1394 Ref *refIO = (Ref *)mps_ref_io;
1331 Pool pool; 1395 return TraceFix2(ss, refIO);
1332
1333 AVERT(ScanState, ss);
1334 AVER(refIO != NULL);
1335
1336 ref = *refIO;
1337
1338 STATISTIC(++ss->fixRefCount);
1339 EVENT4(TraceFix, ss, refIO, ref, ss->rank);
1340
1341 TRACT_OF_ADDR(&tract, ss->arena, ref);
1342 if(tract) {
1343 if(TraceSetInter(TractWhite(tract), ss->traces) != TraceSetEMPTY) {
1344 Seg seg;
1345 if(TRACT_SEG(&seg, tract)) {
1346 STATISTIC(++ss->segRefCount);
1347 STATISTIC(++ss->whiteSegRefCount);
1348 EVENT1(TraceFixSeg, seg);
1349 EVENT0(TraceFixWhite);
1350 pool = TractPool(tract);
1351 PoolFixEmergency(pool, ss, seg, refIO);
1352 }
1353 } else {
1354 /* Tract isn't white. Don't compute seg for non-statistical */
1355 /* variety. See <design/trace/#fix.tractofaddr> */
1356 STATISTIC_STAT
1357 ({
1358 Seg seg;
1359 if(TRACT_SEG(&seg, tract)) {
1360 ++ss->segRefCount;
1361 EVENT1(TraceFixSeg, seg);
1362 }
1363 });
1364 }
1365 } else {
1366 /* See <design/trace/#exact.legal> */
1367 AVER(ss->rank < RankEXACT ||
1368 !ArenaIsReservedAddr(ss->arena, ref));
1369 }
1370
1371 /* See <design/trace/#fix.fixed.all> */
1372 ss->fixedSummary = RefSetAdd(ss->arena, ss->fixedSummary, *refIO);
1373
1374 return ResOK;
1375} 1396}
1376 1397
1377 1398
@@ -1430,7 +1451,7 @@ void TraceScanSingleRef(TraceSet ts, Rank rank, Arena arena,
1430 1451
1431 res = traceScanSingleRefRes(ts, rank, arena, seg, refIO); 1452 res = traceScanSingleRefRes(ts, rank, arena, seg, refIO);
1432 if(res != ResOK) { 1453 if(res != ResOK) {
1433 traceSetSignalEmergency(ts, arena); 1454 ArenaSetEmergency(arena, TRUE);
1434 res = traceScanSingleRefRes(ts, rank, arena, seg, refIO); 1455 res = traceScanSingleRefRes(ts, rank, arena, seg, refIO);
1435 /* Ought to be OK in emergency mode now. */ 1456 /* Ought to be OK in emergency mode now. */
1436 } 1457 }
@@ -1652,7 +1673,18 @@ static void TraceStartGenDesc_diag(GenDesc desc, Bool top, Index i)
1652 } 1673 }
1653} 1674}
1654 1675
1655void TraceStart(Trace trace, double mortality, double finishingTime) 1676
1677/* TraceStart -- start a trace whose white set has been established
1678 *
1679 * The main job of TraceStart is to set up the grey list for a trace. The
1680 * trace is first created with TraceCreate, objects are whitened, then
1681 * TraceStart is called to initialise the tracing process.
1682 *
1683 * NOTE: At present, TraceStart also flips the mutator, so there is no
1684 * grey-mutator tracing.
1685 */
1686
1687Res TraceStart(Trace trace, double mortality, double finishingTime)
1656{ 1688{
1657 Arena arena; 1689 Arena arena;
1658 Res res; 1690 Res res;
@@ -1783,9 +1815,7 @@ void TraceStart(Trace trace, double mortality, double finishingTime)
1783 TracePostStartMessage(trace); 1815 TracePostStartMessage(trace);
1784 1816
1785 /* All traces must flip at beginning at the moment. */ 1817 /* All traces must flip at beginning at the moment. */
1786 traceFlip(trace); 1818 return traceFlip(trace);
1787
1788 return;
1789} 1819}
1790 1820
1791 1821
@@ -1801,6 +1831,7 @@ void TraceStart(Trace trace, double mortality, double finishingTime)
1801void TraceQuantum(Trace trace) 1831void TraceQuantum(Trace trace)
1802{ 1832{
1803 Size pollEnd; 1833 Size pollEnd;
1834 Arena arena = trace->arena;
1804 1835
1805 pollEnd = traceWorkClock(trace) + trace->rate; 1836 pollEnd = traceWorkClock(trace) + trace->rate;
1806 do { 1837 do {
@@ -1810,13 +1841,16 @@ void TraceQuantum(Trace trace)
1810 NOTREACHED; 1841 NOTREACHED;
1811 break; 1842 break;
1812 case TraceFLIPPED: { 1843 case TraceFLIPPED: {
1813 Arena arena = trace->arena;
1814 Seg seg; 1844 Seg seg;
1815 Rank rank; 1845 Rank rank;
1816 1846
1817 if(traceFindGrey(&seg, &rank, arena, trace->ti)) { 1847 if(traceFindGrey(&seg, &rank, arena, trace->ti)) {
1848 Res res;
1818 AVER((SegPool(seg)->class->attr & AttrSCAN) != 0); 1849 AVER((SegPool(seg)->class->attr & AttrSCAN) != 0);
1819 traceScanSeg(TraceSetSingle(trace), rank, arena, seg); 1850 res = traceScanSeg(TraceSetSingle(trace), rank, arena, seg);
1851 /* Allocation failures should be handled by emergency mode, and we
1852 don't expect any other error in a normal GC trace. */
1853 AVER(res == ResOK);
1820 } else { 1854 } else {
1821 trace->state = TraceRECLAIM; 1855 trace->state = TraceRECLAIM;
1822 } 1856 }
@@ -1830,7 +1864,7 @@ void TraceQuantum(Trace trace)
1830 break; 1864 break;
1831 } 1865 }
1832 } while(trace->state != TraceFINISHED 1866 } while(trace->state != TraceFINISHED
1833 && (trace->emergency || traceWorkClock(trace) < pollEnd)); 1867 && (ArenaEmergency(arena) || traceWorkClock(trace) < pollEnd));
1834} 1868}
1835 1869
1836/* TraceStartCollectAll: start a trace which condemns everything in 1870/* TraceStartCollectAll: start a trace which condemns everything in
@@ -1859,12 +1893,25 @@ Res TraceStartCollectAll(Trace *traceReturn, Arena arena, int why)
1859 /* Run out of time, should really try a smaller collection. @@@@ */ 1893 /* Run out of time, should really try a smaller collection. @@@@ */
1860 finishingTime = 0.0; 1894 finishingTime = 0.0;
1861 } 1895 }
1862 TraceStart(trace, TraceTopGenMortality, finishingTime); 1896 res = TraceStart(trace, TraceTopGenMortality, finishingTime);
1897 if (res != ResOK)
1898 goto failStart;
1863 *traceReturn = trace; 1899 *traceReturn = trace;
1864 return ResOK; 1900 return ResOK;
1865 1901
1902failStart:
1903 /* TODO: We can't back-out from a failed TraceStart that has
1904 already done some scanning, so this error path is somewhat bogus if it
1905 destroys the trace. In the current system, TraceStartCollectAll is
1906 only used for a normal GC, so TraceStart should not fail and this case
1907 should never be reached. There's a chance the mutator will survive
1908 if the assertion isn't hit, so drop through anyway. */
1909 NOTREACHED;
1866failCondemn: 1910failCondemn:
1867 TraceDestroy(trace); 1911 TraceDestroy(trace);
1912 /* We don't know how long it'll be before another collection. Make sure
1913 the next one starts in normal mode. */
1914 ArenaSetEmergency(arena, FALSE);
1868 return res; 1915 return res;
1869} 1916}
1870 1917
@@ -1933,7 +1980,9 @@ Size TracePoll(Globals globals)
1933 goto failCondemn; 1980 goto failCondemn;
1934 trace->chain = firstChain; 1981 trace->chain = firstChain;
1935 ChainStartGC(firstChain, trace); 1982 ChainStartGC(firstChain, trace);
1936 TraceStart(trace, mortality, trace->condemned * TraceWorkFactor); 1983 res = TraceStart(trace, mortality, trace->condemned * TraceWorkFactor);
1984 /* We don't expect normal GC traces to fail to start. */
1985 AVER(res == ResOK);
1937 scannedSize = traceWorkClock(trace); 1986 scannedSize = traceWorkClock(trace);
1938 } 1987 }
1939 } /* (dynamicDeferral > 0.0) */ 1988 } /* (dynamicDeferral > 0.0) */
@@ -1950,12 +1999,18 @@ Size TracePoll(Globals globals)
1950 scannedSize = traceWorkClock(trace) - oldScanned; 1999 scannedSize = traceWorkClock(trace) - oldScanned;
1951 if(trace->state == TraceFINISHED) { 2000 if(trace->state == TraceFINISHED) {
1952 TraceDestroy(trace); 2001 TraceDestroy(trace);
2002 /* A trace finished, and hopefully reclaimed some memory, so clear any
2003 emergency. */
2004 ArenaSetEmergency(arena, FALSE);
1953 } 2005 }
1954 } 2006 }
1955 return scannedSize; 2007 return scannedSize;
1956 2008
1957failCondemn: 2009failCondemn:
1958 TraceDestroy(trace); 2010 TraceDestroy(trace);
2011 /* This is an unlikely case, but clear the emergency flag so the next attempt
2012 starts normally. */
2013 ArenaSetEmergency(arena, FALSE);
1959failStart: 2014failStart:
1960 return (Size)0; 2015 return (Size)0;
1961} 2016}
diff --git a/mps/code/traceanc.c b/mps/code/traceanc.c
index 04d7528c24c..24da7a92741 100644
--- a/mps/code/traceanc.c
+++ b/mps/code/traceanc.c
@@ -167,6 +167,9 @@ const char *TraceStartWhyToString(int why)
167 case TraceStartWhyWALK: 167 case TraceStartWhyWALK:
168 r = "Walking all live objects."; 168 r = "Walking all live objects.";
169 break; 169 break;
170 case TraceStartWhyEXTENSION:
171 r = "Extension: an MPS extension started the trace.";
172 break;
170 default: 173 default:
171 NOTREACHED; 174 NOTREACHED;
172 r = "Unknown reason (internal error)."; 175 r = "Unknown reason (internal error).";
@@ -590,6 +593,10 @@ void ArenaPark(Globals globals)
590 } 593 }
591 TRACE_SET_ITER_END(ti, trace, arena->busyTraces, arena); 594 TRACE_SET_ITER_END(ti, trace, arena->busyTraces, arena);
592 } 595 }
596
597 /* Clear any emergency flag so that the next collection starts normally.
598 Any traces that have been finished may have reclaimed memory. */
599 ArenaSetEmergency(arena, FALSE);
593} 600}
594 601
595/* ArenaStartCollect -- start a collection of everything in the 602/* ArenaStartCollect -- start a collection of everything in the
diff --git a/mps/code/w3build.bat b/mps/code/w3build.bat
index a7bbf226df0..ae84606bff6 100755
--- a/mps/code/w3build.bat
+++ b/mps/code/w3build.bat
@@ -20,6 +20,7 @@ mkdir %mpsreleasename%
20 20
21mkdir %mpsreleasename%\include 21mkdir %mpsreleasename%\include
22copy mps.h %mpsreleasename%\include 22copy mps.h %mpsreleasename%\include
23copy mpstr.h %mpsreleasename%\include
23copy mpsavm.h %mpsreleasename%\include 24copy mpsavm.h %mpsreleasename%\include
24copy mpsacl.h %mpsreleasename%\include 25copy mpsacl.h %mpsreleasename%\include
25copy mpscamc.h %mpsreleasename%\include 26copy mpscamc.h %mpsreleasename%\include
diff --git a/mps/code/w3i3mv.nmk b/mps/code/w3i3mv.nmk
index bc05f037b35..37d440235e8 100644
--- a/mps/code/w3i3mv.nmk
+++ b/mps/code/w3i3mv.nmk
@@ -16,10 +16,10 @@ MPM = <ring> <mpm> <bt> <protocol> <boot> \
16 <pool> <poolabs> <poolmfs> <poolmv> \ 16 <pool> <poolabs> <poolmfs> <poolmv> \
17 <root> <format> <buffer> <walk> <lockw3> \ 17 <root> <format> <buffer> <walk> <lockw3> \
18 <ref> <trace> <traceanc> <protw3> <proti3> <prmci3w3> \ 18 <ref> <trace> <traceanc> <protw3> <proti3> <prmci3w3> \
19 <shield> <vmw3> \ 19 <shield> <vmw3> <table> \
20 <thw3> <thw3i3> <ssw3mv> <mpsi> <mpsiw3> <ld> <spi3> \ 20 <thw3> <thw3i3> <ss> <ssw3i3mv> <mpsi> <mpsiw3> <ld> <spi3> \
21 <event> <seg> <sac> <poolmrg> <message> <dbgpool> <dbgpooli> \ 21 <event> <seg> <sac> <poolmrg> <message> <dbgpool> <dbgpooli> \
22 <abq> <meter> <cbs> <poolmv2> <splay> <diag> 22 <abq> <meter> <cbs> <poolmv2> <splay> <diag> <version>
23PLINTH = <mpsliban> <mpsioan> 23PLINTH = <mpsliban> <mpsioan>
24AMC = <poolamc> 24AMC = <poolamc>
25AMS = <poolams> <poolamsi> 25AMS = <poolams> <poolamsi>
diff --git a/mps/code/w3i6mv.nmk b/mps/code/w3i6mv.nmk
index c7b6deb796c..23fc63ef66d 100644
--- a/mps/code/w3i6mv.nmk
+++ b/mps/code/w3i6mv.nmk
@@ -17,8 +17,8 @@ MPM = <ring> <mpm> <bt> <protocol> <boot> \
17 <pool> <poolabs> <poolmfs> <poolmv> \ 17 <pool> <poolabs> <poolmfs> <poolmv> \
18 <root> <format> <buffer> <walk> <lockw3> \ 18 <root> <format> <buffer> <walk> <lockw3> \
19 <ref> <trace> <traceanc> <protw3> <proti6> <prmci6w3> \ 19 <ref> <trace> <traceanc> <protw3> <proti6> <prmci6w3> \
20 <shield> <vmw3> \ 20 <shield> <vmw3> <table> \
21 <thw3> <thw3i6> <ssw3mv> <mpsi> <mpsiw3> <ld> <span> \ 21 <thw3> <thw3i6> <ss> <ssw3i6mv> <mpsi> <mpsiw3> <ld> <span> \
22 <event> <seg> <sac> <poolmrg> <message> <dbgpool> <dbgpooli> \ 22 <event> <seg> <sac> <poolmrg> <message> <dbgpool> <dbgpooli> \
23 <abq> <meter> <cbs> <poolmv2> <splay> <diag> <version> 23 <abq> <meter> <cbs> <poolmv2> <splay> <diag> <version>
24PLINTH = <mpsliban> <mpsioan> 24PLINTH = <mpsliban> <mpsioan>
diff --git a/mps/code/walk.c b/mps/code/walk.c
index bb81a7aa152..9828eb49f57 100644
--- a/mps/code/walk.c
+++ b/mps/code/walk.c
@@ -120,7 +120,7 @@ void mps_arena_formatted_objects_walk(mps_arena_t mps_arena,
120 * scanning them. But there's no direct support for invoking the scanner 120 * scanning them. But there's no direct support for invoking the scanner
121 * without there being a trace, and there's no direct support for 121 * without there being a trace, and there's no direct support for
122 * creating a trace without also condemning part of the heap. (@@@@ This 122 * creating a trace without also condemning part of the heap. (@@@@ This
123 * looks like a useful canditate for inclusion in the future). For now, 123 * looks like a useful candidate for inclusion in the future). For now,
124 * the root walker contains its own code for creating a minimal trace 124 * the root walker contains its own code for creating a minimal trace
125 * and scan state. 125 * and scan state.
126 * 126 *
@@ -185,7 +185,7 @@ static Bool rootsStepClosureCheck(rootsStepClosure rsc)
185 185
186static void rootsStepClosureInit(rootsStepClosure rsc, 186static void rootsStepClosureInit(rootsStepClosure rsc,
187 Globals arena, Trace trace, 187 Globals arena, Trace trace,
188 TraceFixMethod rootFix, 188 PoolFixMethod rootFix,
189 mps_roots_stepper_t f, void *p, size_t s) 189 mps_roots_stepper_t f, void *p, size_t s)
190{ 190{
191 ScanState ss; 191 ScanState ss;
@@ -229,37 +229,26 @@ static void rootsStepClosureFinish(rootsStepClosure rsc)
229 * This doesn't cause further scanning of transitive references, it just 229 * This doesn't cause further scanning of transitive references, it just
230 * calls the client closure. */ 230 * calls the client closure. */
231 231
232static Res RootsWalkFix(ScanState ss, Ref *refIO) 232static Res RootsWalkFix(Pool pool, ScanState ss, Seg seg, Ref *refIO)
233{ 233{
234 rootsStepClosure rsc; 234 rootsStepClosure rsc;
235 Ref ref; 235 Ref ref;
236 Seg seg; 236
237 Arena arena; 237 UNUSED(pool);
238 238
239 AVERT(ScanState, ss); 239 AVERT(ScanState, ss);
240 AVER(refIO != NULL); 240 AVER(refIO != NULL);
241 rsc = ScanState2rootsStepClosure(ss); 241 rsc = ScanState2rootsStepClosure(ss);
242 AVERT(rootsStepClosure, rsc); 242 AVERT(rootsStepClosure, rsc);
243 243
244 arena = ss->arena;
245 ref = *refIO; 244 ref = *refIO;
246 245
247 /* Check that the reference is to a valid segment */ 246 /* If the segment isn't GCable then the ref is not to the heap and */
248 if (SegOfAddr(&seg, arena, ref)) { 247 /* shouldn't be passed to the client. */
249 /* Test if the segment belongs to a GCable pool */ 248 AVER((SegPool(seg)->class->attr & AttrGC) != 0);
250 /* If it isn't then it's not in the heap, and the reference */
251 /* shouldn't be passed to the client */
252 if ((SegPool(seg)->class->attr & AttrGC) != 0) {
253 /* Call the client closure - .assume.rootaddr */
254 rsc->f((mps_addr_t*)refIO, (mps_root_t)rsc->root, rsc->p, rsc->s);
255 }
256 } else {
257 /* See <design/trace/#exact.legal> */
258 AVER(ss->rank < RankEXACT || !ArenaIsReservedAddr(arena, ref));
259 }
260 249
261 /* See <design/trace/#fix.fixed.all> */ 250 /* Call the client closure - .assume.rootaddr */
262 ss->fixedSummary = RefSetAdd(ss->arena, ss->fixedSummary, *refIO); 251 rsc->f((mps_addr_t*)refIO, (mps_root_t)rsc->root, rsc->p, rsc->s);
263 252
264 AVER(ref == *refIO); /* can walk object graph - but not modify it */ 253 AVER(ref == *refIO); /* can walk object graph - but not modify it */
265 254
@@ -298,6 +287,7 @@ static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f,
298 ScanState ss; 287 ScanState ss;
299 Rank rank; 288 Rank rank;
300 Res res; 289 Res res;
290 Seg seg;
301 291
302 AVERT(Globals, arenaGlobals); 292 AVERT(Globals, arenaGlobals);
303 AVER(FUNCHECK(f)); 293 AVER(FUNCHECK(f));
@@ -314,9 +304,19 @@ static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f,
314 /* Have to fail if no trace available. Unlikely due to .assume.parked. */ 304 /* Have to fail if no trace available. Unlikely due to .assume.parked. */
315 if (res != ResOK) 305 if (res != ResOK)
316 return res; 306 return res;
317 /* Set the white set to universal so that the scanner */ 307
318 /* doesn't filter out any references from roots into the arena. */ 308 /* ArenaRootsWalk only passes references to GCable pools to the client. */
319 trace->white = ZoneSetUNIV; 309 /* NOTE: I'm not sure why this is. RB 2012-07-24 */
310 if (SegFirst(&seg, arena)) {
311 Addr base;
312 do {
313 base = SegBase(seg);
314 if ((SegPool(seg)->class->attr & AttrGC) != 0) {
315 TraceAddWhite(trace, seg);
316 }
317 } while (SegNext(&seg, arena, base));
318 }
319
320 /* Make the roots grey so that they are scanned */ 320 /* Make the roots grey so that they are scanned */
321 res = RootsIterate(arenaGlobals, (RootIterateFn)RootGrey, (void *)trace); 321 res = RootsIterate(arenaGlobals, (RootIterateFn)RootGrey, (void *)trace);
322 /* Make this trace look like any other trace. */ 322 /* Make this trace look like any other trace. */
@@ -337,6 +337,7 @@ static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f,
337 /* Make this trace look like any other finished trace. */ 337 /* Make this trace look like any other finished trace. */
338 trace->state = TraceFINISHED; 338 trace->state = TraceFINISHED;
339 TraceDestroy(trace); 339 TraceDestroy(trace);
340 AVER(!ArenaEmergency(arena)); /* There was no allocation. */
340 341
341 return res; 342 return res;
342} 343}