diff options
Diffstat (limited to 'mps/code/buffer.c')
| -rw-r--r-- | mps/code/buffer.c | 1526 |
1 files changed, 1526 insertions, 0 deletions
diff --git a/mps/code/buffer.c b/mps/code/buffer.c new file mode 100644 index 00000000000..22b4cda9485 --- /dev/null +++ b/mps/code/buffer.c | |||
| @@ -0,0 +1,1526 @@ | |||
| 1 | /* impl.c.buffer: ALLOCATION BUFFER IMPLEMENTATION | ||
| 2 | * | ||
| 3 | * $HopeName: MMsrc!buffer.c(trunk.63) $ | ||
| 4 | * Copyright (C) 2000 Harlequin Limited. All rights reserved. | ||
| 5 | * | ||
| 6 | * .purpose: This is (part of) the implementation of allocation buffers. | ||
| 7 | * Several macros which also form part of the implementation are in | ||
| 8 | * impl.h.mps. Several macros forming part of impl.h.mps should be | ||
| 9 | * consistent with the macros and functions in this module. | ||
| 10 | * | ||
| 11 | * DESIGN | ||
| 12 | * | ||
| 13 | * .design: See design.mps.buffer. | ||
| 14 | * | ||
| 15 | * .ap.async: The mutator is allowed to change certain AP fields | ||
| 16 | * asynchronously. Functions that can be called on buffers not | ||
| 17 | * synchronized with the mutator must take care when reading these | ||
| 18 | * fields. Such functions are marked with this tag. | ||
| 19 | * | ||
| 20 | * TRANSGRESSIONS | ||
| 21 | * | ||
| 22 | * .trans.mod: There are several instances where pool structures are | ||
| 23 | * directly accessed by this module because impl.c.pool does not provide | ||
| 24 | * an adequate (or adequately documented) interface. They bear this | ||
| 25 | * tag. */ | ||
| 26 | |||
| 27 | #include "mpm.h" | ||
| 28 | |||
| 29 | SRCID(buffer, "$HopeName: MMsrc!buffer.c(trunk.63) $"); | ||
| 30 | |||
| 31 | |||
| 32 | /* forward declarations */ | ||
| 33 | static void BufferFrameNotifyPopPending(Buffer buffer); | ||
| 34 | |||
| 35 | |||
| 36 | /* BufferCheck -- check consistency of a buffer | ||
| 37 | * | ||
| 38 | * See .ap.async. */ | ||
| 39 | |||
| 40 | Bool BufferCheck(Buffer buffer) | ||
| 41 | { | ||
| 42 | CHECKS(Buffer, buffer); | ||
| 43 | CHECKL(buffer->serial < buffer->pool->bufferSerial); /* .trans.mod */ | ||
| 44 | CHECKU(Arena, buffer->arena); | ||
| 45 | CHECKU(Pool, buffer->pool); | ||
| 46 | CHECKL(buffer->arena == buffer->pool->arena); | ||
| 47 | CHECKL(RingCheck(&buffer->poolRing)); /* design.mps.check.type.no-sig */ | ||
| 48 | CHECKL(BoolCheck(buffer->isMutator)); | ||
| 49 | CHECKL(buffer->fillSize >= 0.0); | ||
| 50 | CHECKL(buffer->emptySize >= 0.0); | ||
| 51 | CHECKL(buffer->emptySize <= buffer->fillSize); | ||
| 52 | CHECKL(buffer->alignment == buffer->pool->alignment); | ||
| 53 | CHECKL(AlignCheck(buffer->alignment)); | ||
| 54 | CHECKL(BoolCheck(buffer->apStruct.enabled)); | ||
| 55 | |||
| 56 | if (buffer->apStruct.enabled) { | ||
| 57 | /* no useful check for frameptr - mutator may be updating it */ | ||
| 58 | CHECKL(BoolCheck(buffer->apStruct.lwPopPending)); | ||
| 59 | } else { | ||
| 60 | CHECKL(buffer->apStruct.lwPopPending == FALSE); | ||
| 61 | CHECKL(buffer->apStruct.frameptr == NULL); | ||
| 62 | } | ||
| 63 | |||
| 64 | /* If any of the buffer's fields indicate that it is reset, make */ | ||
| 65 | /* sure it is really reset. Otherwise, check various properties */ | ||
| 66 | /* of the non-reset fields. */ | ||
| 67 | if (buffer->mode & BufferModeTRANSITION) { | ||
| 68 | /* nothing to check */ | ||
| 69 | } else if ((buffer->mode & BufferModeATTACHED) == 0 | ||
| 70 | || buffer->base == (Addr)0 | ||
| 71 | || buffer->apStruct.init == (Addr)0 | ||
| 72 | || buffer->apStruct.alloc == (Addr)0 | ||
| 73 | || buffer->poolLimit == (Addr)0) { | ||
| 74 | CHECKL((buffer->mode & BufferModeATTACHED) == 0); | ||
| 75 | CHECKL(buffer->base == (Addr)0); | ||
| 76 | CHECKL(buffer->initAtFlip == (Addr)0); | ||
| 77 | CHECKL(buffer->apStruct.init == (Addr)0); | ||
| 78 | CHECKL(buffer->apStruct.alloc == (Addr)0); | ||
| 79 | CHECKL(buffer->apStruct.limit == (Addr)0); | ||
| 80 | /* Nothing reliable to check for lightweight frame state */ | ||
| 81 | CHECKL(buffer->poolLimit == (Addr)0); | ||
| 82 | } else { | ||
| 83 | Addr aplimit; | ||
| 84 | |||
| 85 | /* The buffer is attached to a region of memory. */ | ||
| 86 | /* Check consistency. */ | ||
| 87 | CHECKL(buffer->mode & BufferModeATTACHED); | ||
| 88 | |||
| 89 | /* These fields should obey the ordering */ | ||
| 90 | /* base <= init <= alloc <= poolLimit */ | ||
| 91 | CHECKL(buffer->base <= buffer->apStruct.init); | ||
| 92 | CHECKL(buffer->apStruct.init <= buffer->apStruct.alloc); | ||
| 93 | CHECKL(buffer->apStruct.alloc <= buffer->poolLimit); | ||
| 94 | |||
| 95 | /* Check that the fields are aligned to the buffer alignment. */ | ||
| 96 | CHECKL(AddrIsAligned(buffer->base, buffer->alignment)); | ||
| 97 | CHECKL(AddrIsAligned(buffer->initAtFlip, buffer->alignment)); | ||
| 98 | CHECKL(AddrIsAligned(buffer->apStruct.init, buffer->alignment)); | ||
| 99 | CHECKL(AddrIsAligned(buffer->apStruct.alloc, buffer->alignment)); | ||
| 100 | CHECKL(AddrIsAligned(buffer->apStruct.limit, buffer->alignment)); | ||
| 101 | CHECKL(AddrIsAligned(buffer->poolLimit, buffer->alignment)); | ||
| 102 | |||
| 103 | /* .lwcheck: If LW frames are enabled, the buffer may become */ | ||
| 104 | /* trapped asynchronously. It can't become untrapped */ | ||
| 105 | /* asynchronously, though. See design.mps.alloc-frame.lw-frame.pop. */ | ||
| 106 | /* Read a snapshot value of the limit field. Use this to determine */ | ||
| 107 | /* if we are trapped, and to permit more useful checking when not */ | ||
| 108 | /* yet trapped. */ | ||
| 109 | aplimit = buffer->apStruct.limit; | ||
| 110 | |||
| 111 | /* If the buffer isn't trapped then "limit" should be the limit */ | ||
| 112 | /* set by the owning pool. Otherwise, "init" is either at the */ | ||
| 113 | /* same place it was at flip (.commit.before) or has been set */ | ||
| 114 | /* to "alloc" (.commit.after). Also, when the buffer is */ | ||
| 115 | /* flipped, initAtFlip should hold the init at flip, which is */ | ||
| 116 | /* between the base and current init. Otherwise, initAtFlip */ | ||
| 117 | /* is kept at zero to avoid misuse (see */ | ||
| 118 | /* request.dylan.170429.sol.zero). */ | ||
| 119 | |||
| 120 | if ((buffer->apStruct.enabled && aplimit == (Addr)0) /* see .lwcheck */ | ||
| 121 | || (!buffer->apStruct.enabled && BufferIsTrapped(buffer))) { | ||
| 122 | /* .check.use-trapped: This checking function uses BufferIsTrapped, */ | ||
| 123 | /* So BufferIsTrapped can't do checking as that would cause an */ | ||
| 124 | /* infinite loop. */ | ||
| 125 | CHECKL(aplimit == (Addr)0); | ||
| 126 | if (buffer->mode & BufferModeFLIPPED) { | ||
| 127 | CHECKL(buffer->apStruct.init == buffer->initAtFlip | ||
| 128 | || buffer->apStruct.init == buffer->apStruct.alloc); | ||
| 129 | CHECKL(buffer->base <= buffer->initAtFlip); | ||
| 130 | CHECKL(buffer->initAtFlip <= buffer->apStruct.init); | ||
| 131 | } | ||
| 132 | /* Nothing special to check in the logged mode. */ | ||
| 133 | } else { | ||
| 134 | CHECKL(aplimit == buffer->poolLimit); /* see .lwcheck */ | ||
| 135 | CHECKL(buffer->initAtFlip == (Addr)0); | ||
| 136 | } | ||
| 137 | } | ||
| 138 | |||
| 139 | return TRUE; | ||
| 140 | } | ||
| 141 | |||
| 142 | |||
| 143 | /* BufferDescribe -- write out description of buffer | ||
| 144 | * | ||
| 145 | * See impl.h.mpmst for structure definitions. */ | ||
| 146 | |||
| 147 | Res BufferDescribe(Buffer buffer, mps_lib_FILE *stream) | ||
| 148 | { | ||
| 149 | Res res; | ||
| 150 | |||
| 151 | if (!CHECKT(Buffer, buffer)) return ResFAIL; | ||
| 152 | if (stream == NULL) return ResFAIL; | ||
| 153 | |||
| 154 | res = WriteF(stream, | ||
| 155 | "Buffer $P ($U) {\n", | ||
| 156 | (WriteFP)buffer, (WriteFU)buffer->serial, | ||
| 157 | " class $P (\"$S\")\n", | ||
| 158 | (WriteFP)buffer->class, buffer->class->name, | ||
| 159 | " Arena $P\n", (WriteFP)buffer->arena, | ||
| 160 | " Pool $P\n", (WriteFP)buffer->pool, | ||
| 161 | buffer->isMutator ? | ||
| 162 | " Mutator Buffer\n" : " Internal Buffer\n", | ||
| 163 | " Mode $B\n", (WriteFB)(buffer->mode), | ||
| 164 | " fillSize $UKb\n", (WriteFU)(buffer->fillSize / 1024), | ||
| 165 | " emptySize $UKb\n", (WriteFU)(buffer->emptySize / 1024), | ||
| 166 | " alignment $W\n", (WriteFW)buffer->alignment, | ||
| 167 | " base $A\n", buffer->base, | ||
| 168 | " initAtFlip $A\n", buffer->initAtFlip, | ||
| 169 | " init $A\n", buffer->apStruct.init, | ||
| 170 | " alloc $A\n", buffer->apStruct.alloc, | ||
| 171 | " limit $A\n", buffer->apStruct.limit, | ||
| 172 | " poolLimit $A\n", buffer->poolLimit, | ||
| 173 | NULL); | ||
| 174 | if (res != ResOK) return res; | ||
| 175 | |||
| 176 | res = buffer->class->describe(buffer, stream); | ||
| 177 | if (res != ResOK) return res; | ||
| 178 | |||
| 179 | res = WriteF(stream, "} Buffer $P ($U)\n", | ||
| 180 | (WriteFP)buffer, (WriteFU)buffer->serial, | ||
| 181 | NULL); | ||
| 182 | return res; | ||
| 183 | } | ||
| 184 | |||
| 185 | |||
| 186 | /* BufferInitV -- initialize an allocation buffer */ | ||
| 187 | |||
| 188 | static Res BufferInitV(Buffer buffer, BufferClass class, | ||
| 189 | Pool pool, Bool isMutator, va_list args) | ||
| 190 | { | ||
| 191 | Arena arena; | ||
| 192 | Res res; | ||
| 193 | |||
| 194 | AVER(buffer != NULL); | ||
| 195 | AVERT(BufferClass, class); | ||
| 196 | AVERT(Pool, pool); | ||
| 197 | /* The PoolClass should support buffer protocols */ | ||
| 198 | AVER((pool->class->attr & AttrBUF)); /* .trans.mod */ | ||
| 199 | |||
| 200 | arena = PoolArena(pool); | ||
| 201 | /* Initialize the buffer. See impl.h.mpmst for a definition of */ | ||
| 202 | /* the structure. sig and serial comes later .init.sig-serial */ | ||
| 203 | buffer->arena = arena; | ||
| 204 | buffer->class = class; | ||
| 205 | buffer->pool = pool; | ||
| 206 | RingInit(&buffer->poolRing); | ||
| 207 | buffer->isMutator = isMutator; | ||
| 208 | if (ArenaGlobals(arena)->bufferLogging) { | ||
| 209 | buffer->mode = BufferModeLOGGED; | ||
| 210 | } else { | ||
| 211 | buffer->mode = 0; | ||
| 212 | } | ||
| 213 | buffer->fillSize = 0.0; | ||
| 214 | buffer->emptySize = 0.0; | ||
| 215 | buffer->alignment = pool->alignment; /* .trans.mod */ | ||
| 216 | buffer->base = (Addr)0; | ||
| 217 | buffer->initAtFlip = (Addr)0; | ||
| 218 | buffer->apStruct.init = (Addr)0; | ||
| 219 | buffer->apStruct.alloc = (Addr)0; | ||
| 220 | buffer->apStruct.limit = (Addr)0; | ||
| 221 | buffer->apStruct.frameptr = NULL; | ||
| 222 | buffer->apStruct.enabled = FALSE; | ||
| 223 | buffer->apStruct.lwPopPending = FALSE; | ||
| 224 | buffer->poolLimit = (Addr)0; | ||
| 225 | buffer->rampCount = 0; | ||
| 226 | |||
| 227 | /* .init.sig-serial: Now the vanilla stuff is initialized, */ | ||
| 228 | /* sign the buffer and give it a serial number. It can */ | ||
| 229 | /* then be safely checked in subclass methods. */ | ||
| 230 | buffer->sig = BufferSig; | ||
| 231 | buffer->serial = pool->bufferSerial; /* .trans.mod */ | ||
| 232 | ++pool->bufferSerial; | ||
| 233 | AVERT(Buffer, buffer); | ||
| 234 | |||
| 235 | /* Dispatch to the buffer class method to perform any */ | ||
| 236 | /* class-specific initialization of the buffer. */ | ||
| 237 | res = (*class->init)(buffer, pool, args); | ||
| 238 | if (res != ResOK) | ||
| 239 | goto failInit; | ||
| 240 | |||
| 241 | /* Attach the initialized buffer to the pool. */ | ||
| 242 | RingAppend(&pool->bufferRing, &buffer->poolRing); | ||
| 243 | |||
| 244 | return ResOK; | ||
| 245 | |||
| 246 | failInit: | ||
| 247 | RingFinish(&buffer->poolRing); | ||
| 248 | buffer->sig = SigInvalid; | ||
| 249 | return res; | ||
| 250 | } | ||
| 251 | |||
| 252 | |||
| 253 | /* BufferCreate -- create an allocation buffer | ||
| 254 | * | ||
| 255 | * See design.mps.buffer.method.create. */ | ||
| 256 | |||
| 257 | Res BufferCreate(Buffer *bufferReturn, BufferClass class, | ||
| 258 | Pool pool, Bool isMutator, ...) | ||
| 259 | { | ||
| 260 | Res res; | ||
| 261 | va_list args; | ||
| 262 | |||
| 263 | va_start(args, isMutator); | ||
| 264 | res = BufferCreateV(bufferReturn, class, pool, isMutator, args); | ||
| 265 | va_end(args); | ||
| 266 | return res; | ||
| 267 | } | ||
| 268 | |||
| 269 | |||
| 270 | /* BufferCreateV -- create an allocation buffer, with varargs | ||
| 271 | * | ||
| 272 | * See design.mps.buffer.method.create. */ | ||
| 273 | |||
| 274 | Res BufferCreateV(Buffer *bufferReturn, BufferClass class, | ||
| 275 | Pool pool, Bool isMutator, va_list args) | ||
| 276 | { | ||
| 277 | Res res; | ||
| 278 | Buffer buffer; | ||
| 279 | Arena arena; | ||
| 280 | void *p; | ||
| 281 | |||
| 282 | AVER(bufferReturn != NULL); | ||
| 283 | AVERT(BufferClass, class); | ||
| 284 | AVERT(Pool, pool); | ||
| 285 | |||
| 286 | arena = PoolArena(pool); | ||
| 287 | |||
| 288 | /* Allocate memory for the buffer descriptor structure. */ | ||
| 289 | res = ControlAlloc(&p, arena, class->size, | ||
| 290 | /* withReservoirPermit */ FALSE); | ||
| 291 | if (res != ResOK) | ||
| 292 | goto failAlloc; | ||
| 293 | buffer = p; | ||
| 294 | |||
| 295 | /* Initialize the buffer descriptor structure. */ | ||
| 296 | res = BufferInitV(buffer, class, pool, isMutator, args); | ||
| 297 | if (res != ResOK) | ||
| 298 | goto failInit; | ||
| 299 | |||
| 300 | *bufferReturn = buffer; | ||
| 301 | return ResOK; | ||
| 302 | |||
| 303 | failInit: | ||
| 304 | ControlFree(arena, buffer, class->size); | ||
| 305 | failAlloc: | ||
| 306 | return res; | ||
| 307 | } | ||
| 308 | |||
| 309 | |||
| 310 | /* BufferDetach -- detach a buffer from a region */ | ||
| 311 | |||
| 312 | void BufferDetach(Buffer buffer, Pool pool) | ||
| 313 | { | ||
| 314 | AVERT(Buffer, buffer); | ||
| 315 | AVER(BufferIsReady(buffer)); | ||
| 316 | |||
| 317 | if (!BufferIsReset(buffer)) { | ||
| 318 | Addr init, limit; | ||
| 319 | Size spare; | ||
| 320 | |||
| 321 | buffer->mode |= BufferModeTRANSITION; | ||
| 322 | init = buffer->apStruct.init; | ||
| 323 | limit = buffer->poolLimit; | ||
| 324 | /* Ask the owning pool to do whatever it needs to before the */ | ||
| 325 | /* buffer is detached (e.g. copy buffer state into pool state). */ | ||
| 326 | (*pool->class->bufferEmpty)(pool, buffer, init, limit); | ||
| 327 | /* Use of lightweight frames must have been disabled by now */ | ||
| 328 | AVER(BufferFrameState(buffer) == BufferFrameDISABLED); | ||
| 329 | |||
| 330 | /* run any class-specific detachment method */ | ||
| 331 | buffer->class->detach(buffer); | ||
| 332 | |||
| 333 | spare = AddrOffset(init, limit); | ||
| 334 | buffer->emptySize += spare; | ||
| 335 | if (buffer->isMutator) { | ||
| 336 | buffer->pool->emptyMutatorSize += spare; | ||
| 337 | ArenaGlobals(buffer->arena)->emptyMutatorSize += spare; | ||
| 338 | ArenaGlobals(buffer->arena)->allocMutatorSize += | ||
| 339 | AddrOffset(buffer->base, init); | ||
| 340 | } else { | ||
| 341 | buffer->pool->emptyInternalSize += spare; | ||
| 342 | ArenaGlobals(buffer->arena)->emptyInternalSize += spare; | ||
| 343 | } | ||
| 344 | |||
| 345 | /* Reset the buffer. */ | ||
| 346 | buffer->base = (Addr)0; | ||
| 347 | buffer->initAtFlip = (Addr)0; | ||
| 348 | buffer->apStruct.init = (Addr)0; | ||
| 349 | buffer->apStruct.alloc = (Addr)0; | ||
| 350 | buffer->apStruct.limit = (Addr)0; | ||
| 351 | buffer->poolLimit = (Addr)0; | ||
| 352 | buffer->mode &= | ||
| 353 | ~(BufferModeATTACHED|BufferModeFLIPPED|BufferModeTRANSITION); | ||
| 354 | BufferFrameSetState(buffer, BufferFrameDISABLED); | ||
| 355 | |||
| 356 | EVENT_PW(BufferEmpty, buffer, spare); | ||
| 357 | } | ||
| 358 | } | ||
| 359 | |||
| 360 | |||
| 361 | /* BufferDestroy -- destroy an allocation buffer | ||
| 362 | * | ||
| 363 | * See design.mps.buffer.method.destroy. */ | ||
| 364 | |||
| 365 | void BufferDestroy(Buffer buffer) | ||
| 366 | { | ||
| 367 | Arena arena; | ||
| 368 | BufferClass class; | ||
| 369 | |||
| 370 | AVERT(Buffer, buffer); | ||
| 371 | arena = buffer->arena; | ||
| 372 | class = buffer->class; | ||
| 373 | AVERT(BufferClass, class); | ||
| 374 | BufferFinish(buffer); | ||
| 375 | ControlFree(arena, buffer, class->size); | ||
| 376 | } | ||
| 377 | |||
| 378 | |||
| 379 | /* BufferFinish -- finish an allocation buffer */ | ||
| 380 | |||
| 381 | void BufferFinish(Buffer buffer) | ||
| 382 | { | ||
| 383 | Pool pool; | ||
| 384 | |||
| 385 | AVERT(Buffer, buffer); | ||
| 386 | |||
| 387 | pool = BufferPool(buffer); | ||
| 388 | |||
| 389 | /* The PoolClass should support buffer protocols */ | ||
| 390 | AVER((pool->class->attr & AttrBUF)); /* .trans.mod */ | ||
| 391 | AVER(BufferIsReady(buffer)); | ||
| 392 | |||
| 393 | /* design.mps.alloc-frame.lw-frame.sync.trip */ | ||
| 394 | if (BufferIsTrappedByMutator(buffer)) { | ||
| 395 | BufferFrameNotifyPopPending(buffer); | ||
| 396 | } | ||
| 397 | |||
| 398 | BufferDetach(buffer, pool); | ||
| 399 | |||
| 400 | /* Dispatch to the buffer class method to perform any */ | ||
| 401 | /* class-specific finishing of the buffer. */ | ||
| 402 | (*buffer->class->finish)(buffer); | ||
| 403 | |||
| 404 | /* Detach the buffer from its owning pool and unsig it. */ | ||
| 405 | RingRemove(&buffer->poolRing); | ||
| 406 | buffer->sig = SigInvalid; | ||
| 407 | |||
| 408 | /* Finish off the generic buffer fields. */ | ||
| 409 | RingFinish(&buffer->poolRing); | ||
| 410 | |||
| 411 | EVENT_P(BufferFinish, buffer); | ||
| 412 | } | ||
| 413 | |||
| 414 | |||
| 415 | /* BufferIsReset -- test whether a buffer is in the "reset" state | ||
| 416 | * | ||
| 417 | * A buffer is "reset" when it is not attached. In this state all of | ||
| 418 | * the pointers into the region are zero. This condition is checked by | ||
| 419 | * BufferCheck. */ | ||
| 420 | |||
| 421 | Bool BufferIsReset(Buffer buffer) | ||
| 422 | { | ||
| 423 | AVERT(Buffer, buffer); | ||
| 424 | |||
| 425 | return !(buffer->mode & BufferModeATTACHED); | ||
| 426 | } | ||
| 427 | |||
| 428 | |||
| 429 | /* BufferIsReady -- test whether a buffer is ready for reserve | ||
| 430 | * | ||
| 431 | * BufferIsReady returns TRUE if and only if the buffer is not between a | ||
| 432 | * reserve and commit. The result is only reliable if the client is not | ||
| 433 | * currently using the buffer, since it may update the alloc and init | ||
| 434 | * pointers asynchronously. */ | ||
| 435 | |||
| 436 | Bool BufferIsReady(Buffer buffer) | ||
| 437 | { | ||
| 438 | AVERT(Buffer, buffer); | ||
| 439 | |||
| 440 | return buffer->apStruct.init == buffer->apStruct.alloc; | ||
| 441 | } | ||
| 442 | |||
| 443 | |||
| 444 | /* BufferIsMutator -- test whether buffer belongs to mutator | ||
| 445 | * | ||
| 446 | * Returns TRUE iff mutator was created for the mutator. */ | ||
| 447 | |||
| 448 | Bool BufferIsMutator(Buffer buffer) | ||
| 449 | { | ||
| 450 | AVERT(Buffer, buffer); | ||
| 451 | |||
| 452 | return buffer->isMutator; | ||
| 453 | } | ||
| 454 | |||
| 455 | |||
| 456 | /* BufferSetUnflipped | ||
| 457 | * | ||
| 458 | * Unflip a buffer if it was flipped. */ | ||
| 459 | |||
| 460 | static void BufferSetUnflipped(Buffer buffer) | ||
| 461 | { | ||
| 462 | AVERT(Buffer, buffer); | ||
| 463 | AVER(buffer->mode & BufferModeFLIPPED); | ||
| 464 | buffer->mode &= ~BufferModeFLIPPED; | ||
| 465 | /* restore apStruct.limit if appropriate */ | ||
| 466 | if (!BufferIsTrapped(buffer)) { | ||
| 467 | buffer->apStruct.limit = buffer->poolLimit; | ||
| 468 | } | ||
| 469 | buffer->initAtFlip = (Addr)0; | ||
| 470 | } | ||
| 471 | |||
| 472 | |||
| 473 | /* BufferFrameState | ||
| 474 | * | ||
| 475 | * Returns the frame state of a buffer. See | ||
| 476 | * design.mps.alloc-frame.lw-frame.states. */ | ||
| 477 | |||
| 478 | FrameState BufferFrameState(Buffer buffer) | ||
| 479 | { | ||
| 480 | AVERT(Buffer, buffer); | ||
| 481 | if (buffer->apStruct.enabled) { | ||
| 482 | if (buffer->apStruct.lwPopPending) { | ||
| 483 | return BufferFramePOP_PENDING; | ||
| 484 | } else { | ||
| 485 | AVER(buffer->apStruct.frameptr == NULL); | ||
| 486 | return BufferFrameVALID; | ||
| 487 | } | ||
| 488 | } else { | ||
| 489 | AVER(buffer->apStruct.frameptr == NULL); | ||
| 490 | AVER(buffer->apStruct.lwPopPending == FALSE); | ||
| 491 | return BufferFrameDISABLED; | ||
| 492 | } | ||
| 493 | } | ||
| 494 | |||
| 495 | |||
| 496 | /* BufferFrameSetState | ||
| 497 | * | ||
| 498 | * Sets the frame state of a buffer. Only the mutator may set the | ||
| 499 | * PopPending state. See design.mps.alloc-frame.lw-frame.states. */ | ||
| 500 | |||
| 501 | void BufferFrameSetState(Buffer buffer, FrameState state) | ||
| 502 | { | ||
| 503 | AVERT(Buffer, buffer); | ||
| 504 | AVER(state == BufferFrameVALID || state == BufferFrameDISABLED); | ||
| 505 | buffer->apStruct.frameptr = NULL; | ||
| 506 | buffer->apStruct.lwPopPending = FALSE; | ||
| 507 | buffer->apStruct.enabled = (state == BufferFrameVALID); | ||
| 508 | } | ||
| 509 | |||
| 510 | |||
| 511 | /* BufferSetAllocAddr | ||
| 512 | * | ||
| 513 | * Sets the init & alloc pointers of a buffer. */ | ||
| 514 | |||
| 515 | void BufferSetAllocAddr(Buffer buffer, Addr addr) | ||
| 516 | { | ||
| 517 | AVERT(Buffer, buffer); | ||
| 518 | /* Can't check Addr */ | ||
| 519 | AVER(BufferIsReady(buffer)); | ||
| 520 | AVER(buffer->base <= addr); | ||
| 521 | AVER(buffer->poolLimit >= addr); | ||
| 522 | |||
| 523 | buffer->apStruct.init = addr; | ||
| 524 | buffer->apStruct.alloc = addr; | ||
| 525 | } | ||
| 526 | |||
| 527 | |||
| 528 | /* BufferFrameNotifyPopPending | ||
| 529 | * | ||
| 530 | * Notifies the pool when a lightweight frame pop operation has been | ||
| 531 | * deferred and needs to be processed. See | ||
| 532 | * design.mps.alloc-frame.lw-frame.sync.trip. */ | ||
| 533 | |||
| 534 | static void BufferFrameNotifyPopPending(Buffer buffer) | ||
| 535 | { | ||
| 536 | AllocFrame frame; | ||
| 537 | Pool pool; | ||
| 538 | AVER(BufferIsTrappedByMutator(buffer)); | ||
| 539 | AVER(BufferFrameState(buffer) == BufferFramePOP_PENDING); | ||
| 540 | frame = (AllocFrame)buffer->apStruct.frameptr; | ||
| 541 | /* Unset PopPending state & notify the pool */ | ||
| 542 | BufferFrameSetState(buffer, BufferFrameVALID); | ||
| 543 | /* If the frame is no longer trapped, undo the trap by resetting */ | ||
| 544 | /* the AP limit pointer */ | ||
| 545 | if (!BufferIsTrapped(buffer)) { | ||
| 546 | buffer->apStruct.limit = buffer->poolLimit; | ||
| 547 | } | ||
| 548 | pool = BufferPool(buffer); | ||
| 549 | (*pool->class->framePopPending)(pool, buffer, frame); | ||
| 550 | } | ||
| 551 | |||
| 552 | |||
| 553 | |||
| 554 | /* BufferFramePush | ||
| 555 | * | ||
| 556 | * See design.mps.alloc-frame. */ | ||
| 557 | |||
| 558 | Res BufferFramePush(AllocFrame *frameReturn, Buffer buffer) | ||
| 559 | { | ||
| 560 | Pool pool; | ||
| 561 | AVERT(Buffer, buffer); | ||
| 562 | AVER(frameReturn != NULL); | ||
| 563 | |||
| 564 | |||
| 565 | /* Process any flip or PopPending */ | ||
| 566 | if (!BufferIsReset(buffer) && buffer->apStruct.limit == (Addr)0) { | ||
| 567 | /* .fill.unflip: If the buffer is flipped then we unflip the buffer. */ | ||
| 568 | if (buffer->mode & BufferModeFLIPPED) { | ||
| 569 | BufferSetUnflipped(buffer); | ||
| 570 | } | ||
| 571 | |||
| 572 | /* check for PopPending */ | ||
| 573 | if (BufferIsTrappedByMutator(buffer)) { | ||
| 574 | BufferFrameNotifyPopPending(buffer); | ||
| 575 | } | ||
| 576 | } | ||
| 577 | pool = BufferPool(buffer); | ||
| 578 | return (*pool->class->framePush)(frameReturn, pool, buffer); | ||
| 579 | } | ||
| 580 | |||
| 581 | |||
| 582 | /* BufferFramePop | ||
| 583 | * | ||
| 584 | * See design.mps.alloc-frame. */ | ||
| 585 | |||
| 586 | Res BufferFramePop(Buffer buffer, AllocFrame frame) | ||
| 587 | { | ||
| 588 | Pool pool; | ||
| 589 | AVERT(Buffer, buffer); | ||
| 590 | /* frame is of an abstract type & can't be checked */ | ||
| 591 | pool = BufferPool(buffer); | ||
| 592 | return (*pool->class->framePop)(pool, buffer, frame); | ||
| 593 | |||
| 594 | } | ||
| 595 | |||
| 596 | |||
| 597 | |||
| 598 | /* BufferReserve -- reserve memory from an allocation buffer | ||
| 599 | * | ||
| 600 | * .reserve: Keep in sync with impl.h.mps.reserve. */ | ||
| 601 | |||
| 602 | Res BufferReserve(Addr *pReturn, Buffer buffer, Size size, | ||
| 603 | Bool withReservoirPermit) | ||
| 604 | { | ||
| 605 | Addr next; | ||
| 606 | |||
| 607 | AVER(pReturn != NULL); | ||
| 608 | AVERT(Buffer, buffer); | ||
| 609 | AVER(size > 0); | ||
| 610 | AVER(SizeIsAligned(size, BufferPool(buffer)->alignment)); | ||
| 611 | AVER(BufferIsReady(buffer)); | ||
| 612 | AVER(BoolCheck(withReservoirPermit)); | ||
| 613 | |||
| 614 | /* Is there enough room in the unallocated portion of the buffer to */ | ||
| 615 | /* satisfy the request? If so, just increase the alloc marker and */ | ||
| 616 | /* return a pointer to the area below it. */ | ||
| 617 | next = AddrAdd(buffer->apStruct.alloc, size); | ||
| 618 | if (next > buffer->apStruct.alloc && next <= buffer->apStruct.limit) { | ||
| 619 | buffer->apStruct.alloc = next; | ||
| 620 | *pReturn = buffer->apStruct.init; | ||
| 621 | return ResOK; | ||
| 622 | } | ||
| 623 | |||
| 624 | /* If the buffer can't accommodate the request, call "fill". */ | ||
| 625 | return BufferFill(pReturn, buffer, size, withReservoirPermit); | ||
| 626 | } | ||
| 627 | |||
| 628 | |||
| 629 | /* BufferAttach -- attach a region to a buffer | ||
| 630 | * | ||
| 631 | * BufferAttach is entered because of a BufferFill, or because of a Pop | ||
| 632 | * operation on a lightweight frame. */ | ||
| 633 | |||
| 634 | void BufferAttach(Buffer buffer, Addr base, Addr limit, | ||
| 635 | Addr init, Size size) | ||
| 636 | { | ||
| 637 | Size filled; | ||
| 638 | |||
| 639 | AVERT(Buffer, buffer); | ||
| 640 | AVER(BufferIsReset(buffer)); | ||
| 641 | AVER(AddrAdd(base, size) <= limit); | ||
| 642 | AVER(base <= init); | ||
| 643 | AVER(init <= limit); | ||
| 644 | |||
| 645 | /* Set up the buffer to point at the supplied region */ | ||
| 646 | buffer->mode |= BufferModeATTACHED; | ||
| 647 | buffer->base = base; | ||
| 648 | buffer->apStruct.init = init; | ||
| 649 | buffer->apStruct.alloc = AddrAdd(init, size); | ||
| 650 | /* only set limit if not logged */ | ||
| 651 | if ((buffer->mode & BufferModeLOGGED) == 0) { | ||
| 652 | buffer->apStruct.limit = limit; | ||
| 653 | } else { | ||
| 654 | AVER(buffer->apStruct.limit == (Addr)0); | ||
| 655 | } | ||
| 656 | AVER(buffer->initAtFlip == (Addr)0); | ||
| 657 | buffer->poolLimit = limit; | ||
| 658 | |||
| 659 | filled = AddrOffset(init, limit); | ||
| 660 | buffer->fillSize += filled; | ||
| 661 | if (buffer->isMutator) { | ||
| 662 | if (base != init) { /* see design.mps.buffer.count.alloc.how */ | ||
| 663 | Size prealloc = AddrOffset(base, init); | ||
| 664 | ArenaGlobals(buffer->arena)->allocMutatorSize -= prealloc; | ||
| 665 | } | ||
| 666 | buffer->pool->fillMutatorSize += filled; | ||
| 667 | ArenaGlobals(buffer->arena)->fillMutatorSize += filled; | ||
| 668 | } else { | ||
| 669 | buffer->pool->fillInternalSize += filled; | ||
| 670 | ArenaGlobals(buffer->arena)->fillInternalSize += filled; | ||
| 671 | } | ||
| 672 | |||
| 673 | /* run any class-specific attachment method */ | ||
| 674 | buffer->class->attach(buffer, base, limit, init, size); | ||
| 675 | |||
| 676 | AVERT(Buffer, buffer); | ||
| 677 | EVENT_PWAW(BufferFill, buffer, size, base, filled); | ||
| 678 | } | ||
| 679 | |||
| 680 | |||
| 681 | /* BufferFill -- refill an empty buffer | ||
| 682 | * | ||
| 683 | * BufferFill is entered by the "reserve" operation on a buffer if there | ||
| 684 | * isn't enough room between "alloc" and "limit" to satisfy an | ||
| 685 | * allocation request. This might be because the buffer has been | ||
| 686 | * trapped and "limit" has been set to zero. */ | ||
| 687 | |||
| 688 | Res BufferFill(Addr *pReturn, Buffer buffer, Size size, | ||
| 689 | Bool withReservoirPermit) | ||
| 690 | { | ||
| 691 | Res res; | ||
| 692 | Pool pool; | ||
| 693 | Addr base, limit, next; | ||
| 694 | |||
| 695 | AVER(pReturn != NULL); | ||
| 696 | AVERT(Buffer, buffer); | ||
| 697 | AVER(size > 0); | ||
| 698 | AVER(SizeIsAligned(size, BufferPool(buffer)->alignment)); | ||
| 699 | AVER(BufferIsReady(buffer)); | ||
| 700 | |||
| 701 | pool = BufferPool(buffer); | ||
| 702 | |||
| 703 | /* If we're here because the buffer was trapped, then we attempt */ | ||
| 704 | /* the allocation here. */ | ||
| 705 | if (!BufferIsReset(buffer) && buffer->apStruct.limit == (Addr)0) { | ||
| 706 | /* .fill.unflip: If the buffer is flipped then we unflip the buffer. */ | ||
| 707 | if (buffer->mode & BufferModeFLIPPED) { | ||
| 708 | BufferSetUnflipped(buffer); | ||
| 709 | } | ||
| 710 | |||
| 711 | /* design.mps.alloc-frame.lw-frame.sync.trip */ | ||
| 712 | if (BufferIsTrappedByMutator(buffer)) { | ||
| 713 | BufferFrameNotifyPopPending(buffer); | ||
| 714 | } | ||
| 715 | |||
| 716 | /* .fill.logged: If the buffer is logged then we leave it logged. */ | ||
| 717 | next = AddrAdd(buffer->apStruct.alloc, size); | ||
| 718 | if (next > buffer->apStruct.alloc && | ||
| 719 | next <= buffer->poolLimit) { | ||
| 720 | buffer->apStruct.alloc = next; | ||
| 721 | if (buffer->mode & BufferModeLOGGED) { | ||
| 722 | EVENT_PAW(BufferReserve, buffer, buffer->apStruct.init, size); | ||
| 723 | } | ||
| 724 | *pReturn = buffer->apStruct.init; | ||
| 725 | return ResOK; | ||
| 726 | } | ||
| 727 | } | ||
| 728 | |||
| 729 | /* There really isn't enough room for the allocation now. */ | ||
| 730 | AVER(AddrAdd(buffer->apStruct.alloc, size) > buffer->poolLimit | ||
| 731 | || AddrAdd(buffer->apStruct.alloc, size) < buffer->apStruct.alloc); | ||
| 732 | |||
| 733 | BufferDetach(buffer, pool); | ||
| 734 | |||
| 735 | /* Ask the pool for some memory. */ | ||
| 736 | res = (*pool->class->bufferFill)(&base, &limit, | ||
| 737 | pool, buffer, size, | ||
| 738 | withReservoirPermit); | ||
| 739 | if (res != ResOK) | ||
| 740 | return res; | ||
| 741 | |||
| 742 | /* Set up the buffer to point at the memory given by the pool */ | ||
| 743 | /* and do the allocation that was requested by the client. */ | ||
| 744 | BufferAttach(buffer, base, limit, base, size); | ||
| 745 | |||
| 746 | if (buffer->mode & BufferModeLOGGED) { | ||
| 747 | EVENT_PAW(BufferReserve, buffer, buffer->apStruct.init, size); | ||
| 748 | } | ||
| 749 | |||
| 750 | *pReturn = base; | ||
| 751 | return res; | ||
| 752 | } | ||
| 753 | |||
| 754 | |||
| 755 | |||
| 756 | /* BufferCommit -- commit memory previously reserved | ||
| 757 | * | ||
| 758 | * .commit: Keep in sync with impl.h.mps.commit. */ | ||
| 759 | |||
| 760 | Bool BufferCommit(Buffer buffer, Addr p, Size size) | ||
| 761 | { | ||
| 762 | AVERT(Buffer, buffer); | ||
| 763 | AVER(size > 0); | ||
| 764 | AVER(SizeIsAligned(size, BufferPool(buffer)->alignment)); | ||
| 765 | AVER(!BufferIsReady(buffer)); | ||
| 766 | |||
| 767 | /* See design.mps.collection.flip. */ | ||
| 768 | /* .commit.before: If a flip occurs before this point, when the */ | ||
| 769 | /* pool reads "initAtFlip" it will point below the object, so it */ | ||
| 770 | /* will be trashed and the commit must fail when trip is called. */ | ||
| 771 | AVER(p == buffer->apStruct.init); | ||
| 772 | AVER(AddrAdd(buffer->apStruct.init, size) == buffer->apStruct.alloc); | ||
| 773 | |||
| 774 | /* .commit.update: Atomically update the init pointer to declare */ | ||
| 775 | /* that the object is initialized (though it may be invalid if a */ | ||
| 776 | /* flip occurred). */ | ||
| 777 | buffer->apStruct.init = buffer->apStruct.alloc; | ||
| 778 | |||
| 779 | /* .improve.memory-barrier: Memory barrier here on the DEC Alpha */ | ||
| 780 | /* (and other relaxed memory order architectures). */ | ||
| 781 | /* .commit.after: If a flip occurs at this point, the pool will */ | ||
| 782 | /* see "initAtFlip" above the object, which is valid, so it will */ | ||
| 783 | /* be collected. The commit must succeed when trip is called. */ | ||
| 784 | /* The pointer "p" will have been fixed up. (@@@@ Will it?) */ | ||
| 785 | /* .commit.trip: Trip the buffer if a flip has occurred. */ | ||
| 786 | if (buffer->apStruct.limit == 0) | ||
| 787 | return BufferTrip(buffer, p, size); | ||
| 788 | |||
| 789 | /* No flip occurred, so succeed. */ | ||
| 790 | |||
| 791 | return TRUE; | ||
| 792 | } | ||
| 793 | |||
| 794 | |||
| 795 | /* BufferTrip -- act on a trapped buffer | ||
| 796 | * | ||
| 797 | * Called from BufferCommit (and its equivalents) when invoked on a | ||
| 798 | * trapped buffer (indicated by limit == 0). This function can decide | ||
| 799 | * whether to succeed or fail the commit. */ | ||
| 800 | |||
| 801 | Bool BufferTrip(Buffer buffer, Addr p, Size size) | ||
| 802 | { | ||
| 803 | Pool pool; | ||
| 804 | |||
| 805 | AVERT(Buffer, buffer); | ||
| 806 | AVER(p != 0); | ||
| 807 | AVER(size > 0); | ||
| 808 | AVER(SizeIsAligned(size, buffer->alignment)); | ||
| 809 | |||
| 810 | /* The limit field should be zero, because that's how trip gets */ | ||
| 811 | /* called. See .commit.trip. */ | ||
| 812 | AVER(buffer->apStruct.limit == 0); | ||
| 813 | /* Of course we should be trapped. */ | ||
| 814 | AVER(BufferIsTrapped(buffer)); | ||
| 815 | /* But the mutator shouldn't have caused the trap */ | ||
| 816 | AVER(!BufferIsTrappedByMutator(buffer)); | ||
| 817 | |||
| 818 | /* The init and alloc fields should be equal at this point, because */ | ||
| 819 | /* the step .commit.update has happened. */ | ||
| 820 | AVER(buffer->apStruct.init == buffer->apStruct.alloc); | ||
| 821 | |||
| 822 | /* The p parameter points at the base address of the allocated */ | ||
| 823 | /* block, the end of which should now coincide with the init and */ | ||
| 824 | /* alloc fields. */ | ||
| 825 | /* Note that we don't _really_ care about p too much. We don't */ | ||
| 826 | /* do anything else with it apart from these checks. (in particular */ | ||
| 827 | /* it seems like the algorithms could be modified to cope with the */ | ||
| 828 | /* case of the object having been copied between Commit updating i */ | ||
| 829 | /* and testing limit) */ | ||
| 830 | AVER(AddrAdd(p, size) == buffer->apStruct.init); | ||
| 831 | |||
| 832 | pool = BufferPool(buffer); | ||
| 833 | |||
| 834 | AVER(PoolHasAddr(pool, p)); | ||
| 835 | |||
| 836 | /* .trip.unflip: If the flip occurred before commit set "init" */ | ||
| 837 | /* to "alloc" (see .commit.before) then the object is invalid */ | ||
| 838 | /* (won't've been scanned) so undo the allocation and fail commit. */ | ||
| 839 | /* Otherwise (see .commit.after) the object is valid (will've been */ | ||
| 840 | /* scanned) so commit can simply succeed. */ | ||
| 841 | if ((buffer->mode & BufferModeFLIPPED) | ||
| 842 | && buffer->apStruct.init != buffer->initAtFlip) { | ||
| 843 | /* Reset just enough state for Reserve/Fill to work. */ | ||
| 844 | /* The buffer is left trapped and we leave the untrapping */ | ||
| 845 | /* for the next reserve (which goes out of line to Fill */ | ||
| 846 | /* (.fill.unflip) because the buffer is still trapped) */ | ||
| 847 | buffer->apStruct.init = p; | ||
| 848 | buffer->apStruct.alloc = p; | ||
| 849 | return FALSE; | ||
| 850 | } | ||
| 851 | |||
| 852 | /* Emit event including class if loggged */ | ||
| 853 | if (buffer->mode & BufferModeLOGGED) { | ||
| 854 | Bool b; | ||
| 855 | Format format; | ||
| 856 | Addr clientClass; | ||
| 857 | |||
| 858 | b = PoolFormat(&format, buffer->pool); | ||
| 859 | if (b) { | ||
| 860 | clientClass = format->class(p); | ||
| 861 | } else { | ||
| 862 | clientClass = (Addr)0; | ||
| 863 | } | ||
| 864 | EVENT_PAWA(BufferCommit, buffer, p, size, clientClass); | ||
| 865 | /* Of course, it's not _really_ unused unless you're not */ | ||
| 866 | /* using telemetry. This is a HACK @@@@. It should be */ | ||
| 867 | /* removed when telemetry is fixed to use its arguments. */ | ||
| 868 | UNUSED(clientClass); | ||
| 869 | } | ||
| 870 | return TRUE; | ||
| 871 | } | ||
| 872 | |||
| 873 | |||
| 874 | /* BufferFlip -- trap buffer at GC flip time | ||
| 875 | * | ||
| 876 | * .flip: Tells the buffer that a flip has occurred. If the buffer is | ||
| 877 | * between reserve and commit, and has a rank (i.e. references), and has | ||
| 878 | * the two-phase protocol, then the object being initialized is | ||
| 879 | * invalidated by failing the next commit. The buffer code handles this | ||
| 880 | * automatically (ie the pool implementation is not involved). If the | ||
| 881 | * buffer is reset there is no effect, since there is no object to | ||
| 882 | * invalidate. If the buffer is already flipped there is no effect, | ||
| 883 | * since the object is already invalid by a previous trace. The buffer | ||
| 884 | * becomes unflipped at the next reserve or commit operation (actually | ||
| 885 | * reserve because commit is lazy). This is handled by BufferFill | ||
| 886 | * (.fill.unflip) or BufferTrip (.trip.unflip). */ | ||
| 887 | |||
| 888 | void BufferFlip(Buffer buffer) | ||
| 889 | { | ||
| 890 | AVERT(Buffer, buffer); | ||
| 891 | |||
| 892 | if (BufferRankSet(buffer) != RankSetEMPTY | ||
| 893 | && (buffer->mode & BufferModeFLIPPED) == 0 | ||
| 894 | && !BufferIsReset(buffer)) { | ||
| 895 | AVER(buffer->initAtFlip == (Addr)0); | ||
| 896 | buffer->initAtFlip = buffer->apStruct.init; | ||
| 897 | /* Memory Barrier here? @@@@ */ | ||
| 898 | buffer->apStruct.limit = (Addr)0; | ||
| 899 | buffer->mode |= BufferModeFLIPPED; | ||
| 900 | } | ||
| 901 | } | ||
| 902 | |||
| 903 | |||
| 904 | /* BufferScanLimit -- return limit of data to which to scan | ||
| 905 | * | ||
| 906 | * Returns the highest address to which it is safe to scan objects in | ||
| 907 | * the buffer. When the buffer is not flipped, this is the "init" of | ||
| 908 | * the AP. When the buffer is flipped, it is the value that "init" had | ||
| 909 | * at flip time. [Could make BufferScanLimit return the AP "alloc" when | ||
| 910 | * using ambiguous scanning.] See .ap.async. */ | ||
| 911 | |||
| 912 | Addr BufferScanLimit(Buffer buffer) | ||
| 913 | { | ||
| 914 | if (buffer->mode & BufferModeFLIPPED) { | ||
| 915 | return buffer->initAtFlip; | ||
| 916 | } else { | ||
| 917 | return buffer->apStruct.init; | ||
| 918 | } | ||
| 919 | } | ||
| 920 | |||
| 921 | |||
| 922 | Seg BufferSeg(Buffer buffer) | ||
| 923 | { | ||
| 924 | AVERT(Buffer, buffer); | ||
| 925 | return buffer->class->seg(buffer); | ||
| 926 | } | ||
| 927 | |||
| 928 | |||
| 929 | RankSet BufferRankSet(Buffer buffer) | ||
| 930 | { | ||
| 931 | AVERT(Buffer, buffer); | ||
| 932 | return buffer->class->rankSet(buffer); | ||
| 933 | } | ||
| 934 | |||
| 935 | void BufferSetRankSet(Buffer buffer, RankSet rankset) | ||
| 936 | { | ||
| 937 | AVERT(Buffer, buffer); | ||
| 938 | AVERT(RankSet, rankset); | ||
| 939 | buffer->class->setRankSet(buffer, rankset); | ||
| 940 | } | ||
| 941 | |||
| 942 | |||
| 943 | /* BufferReassignSeg -- adjust the seg of an attached buffer | ||
| 944 | * | ||
| 945 | * Used for segment splitting and merging. */ | ||
| 946 | |||
| 947 | void BufferReassignSeg(Buffer buffer, Seg seg) | ||
| 948 | { | ||
| 949 | AVERT(Buffer, buffer); | ||
| 950 | AVERT(Seg, seg); | ||
| 951 | AVER(!BufferIsReset(buffer)); | ||
| 952 | AVER(BufferBase(buffer) >= SegBase(seg)); | ||
| 953 | AVER(BufferLimit(buffer) <= SegLimit(seg)); | ||
| 954 | AVER(BufferPool(buffer) == SegPool(seg)); | ||
| 955 | buffer->class->reassignSeg(buffer, seg); | ||
| 956 | } | ||
| 957 | |||
| 958 | |||
| 959 | /* BufferIsTrapped | ||
| 960 | * | ||
| 961 | * Indicates whether the buffer is trapped - either by MPS or the | ||
| 962 | * mutator. See .ap.async. */ | ||
| 963 | |||
| 964 | Bool BufferIsTrapped(Buffer buffer) | ||
| 965 | { | ||
| 966 | /* Can't check buffer, see .check.use-trapped */ | ||
| 967 | return BufferIsTrappedByMutator(buffer) | ||
| 968 | || ((buffer->mode & (BufferModeFLIPPED|BufferModeLOGGED)) != 0); | ||
| 969 | } | ||
| 970 | |||
| 971 | |||
| 972 | /* BufferIsTrappedByMutator | ||
| 973 | * | ||
| 974 | * Indicates whether the mutator trapped the buffer. See | ||
| 975 | * design.mps.alloc-frame.lw-frame.sync.trip and .ap.async. */ | ||
| 976 | |||
| 977 | Bool BufferIsTrappedByMutator(Buffer buffer) | ||
| 978 | { | ||
| 979 | AVER(!buffer->apStruct.lwPopPending || buffer->apStruct.enabled); | ||
| 980 | /* Can't check buffer, see .check.use-trapped */ | ||
| 981 | return buffer->apStruct.lwPopPending; | ||
| 982 | } | ||
| 983 | |||
| 984 | |||
| 985 | /* Alloc pattern functions | ||
| 986 | * | ||
| 987 | * Just represent the two patterns by two different pointers to dummies. */ | ||
| 988 | |||
| 989 | AllocPatternStruct AllocPatternRampStruct = {'\0'}; | ||
| 990 | |||
| 991 | AllocPattern AllocPatternRamp(void) | ||
| 992 | { | ||
| 993 | return &AllocPatternRampStruct; | ||
| 994 | } | ||
| 995 | |||
| 996 | AllocPatternStruct AllocPatternRampCollectAllStruct = {'\0'}; | ||
| 997 | |||
| 998 | AllocPattern AllocPatternRampCollectAll(void) | ||
| 999 | { | ||
| 1000 | return &AllocPatternRampCollectAllStruct; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | static Bool AllocPatternCheck(AllocPattern pattern) | ||
| 1004 | { | ||
| 1005 | CHECKL(pattern == &AllocPatternRampCollectAllStruct | ||
| 1006 | || pattern == &AllocPatternRampStruct); | ||
| 1007 | UNUSED(pattern); /* impl.c.mpm.check.unused */ | ||
| 1008 | return TRUE; | ||
| 1009 | } | ||
| 1010 | |||
| 1011 | |||
| 1012 | /* BufferRampBegin -- note an entry into a ramp pattern | ||
| 1013 | * | ||
| 1014 | * .ramp.hack: We count the number of times the ap has begun ramp mode | ||
| 1015 | * (and not ended), so we can do reset by ending all the current ramps. */ | ||
| 1016 | |||
| 1017 | void BufferRampBegin(Buffer buffer, AllocPattern pattern) | ||
| 1018 | { | ||
| 1019 | Pool pool; | ||
| 1020 | |||
| 1021 | AVERT(Buffer, buffer); | ||
| 1022 | AVERT(AllocPattern, pattern); | ||
| 1023 | |||
| 1024 | ++buffer->rampCount; | ||
| 1025 | AVER(buffer->rampCount > 0); | ||
| 1026 | |||
| 1027 | pool = BufferPool(buffer); | ||
| 1028 | AVERT(Pool, pool); | ||
| 1029 | (*pool->class->rampBegin)(pool, buffer, | ||
| 1030 | pattern == &AllocPatternRampCollectAllStruct); | ||
| 1031 | } | ||
| 1032 | |||
| 1033 | |||
| 1034 | /* BufferRampEnd -- note an exit from a ramp pattern */ | ||
| 1035 | |||
| 1036 | Res BufferRampEnd(Buffer buffer) | ||
| 1037 | { | ||
| 1038 | Pool pool; | ||
| 1039 | |||
| 1040 | AVERT(Buffer, buffer); | ||
| 1041 | |||
| 1042 | if (buffer->rampCount == 0) | ||
| 1043 | return ResFAIL; | ||
| 1044 | --buffer->rampCount; | ||
| 1045 | |||
| 1046 | pool = BufferPool(buffer); | ||
| 1047 | AVERT(Pool, pool); | ||
| 1048 | (*pool->class->rampEnd)(pool, buffer); | ||
| 1049 | return ResOK; | ||
| 1050 | } | ||
| 1051 | |||
| 1052 | |||
| 1053 | /* BufferRampReset -- exit from ramp mode */ | ||
| 1054 | |||
| 1055 | void BufferRampReset(Buffer buffer) | ||
| 1056 | { | ||
| 1057 | Pool pool; | ||
| 1058 | |||
| 1059 | AVERT(Buffer, buffer); | ||
| 1060 | |||
| 1061 | if (buffer->rampCount == 0) | ||
| 1062 | return; | ||
| 1063 | |||
| 1064 | pool = BufferPool(buffer); | ||
| 1065 | AVERT(Pool, pool); | ||
| 1066 | do | ||
| 1067 | (*pool->class->rampEnd)(pool, buffer); | ||
| 1068 | while(--buffer->rampCount > 0); | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | |||
| 1072 | |||
| 1073 | /* BufferClass -- support for the basic Buffer class */ | ||
| 1074 | |||
| 1075 | |||
| 1076 | /* bufferTrivInit -- basic buffer init method */ | ||
| 1077 | |||
| 1078 | static Res bufferTrivInit (Buffer buffer, Pool pool, va_list args) | ||
| 1079 | { | ||
| 1080 | /* initialization happens in BufferInitV so checks are safe */ | ||
| 1081 | AVERT(Buffer, buffer); | ||
| 1082 | AVERT(Pool, pool); | ||
| 1083 | UNUSED(args); | ||
| 1084 | EVENT_PPU(BufferInit, buffer, pool, buffer->isMutator); | ||
| 1085 | return ResOK; | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | |||
| 1089 | /* bufferTrivFinish -- basic buffer finish method */ | ||
| 1090 | |||
| 1091 | static void bufferTrivFinish (Buffer buffer) | ||
| 1092 | { | ||
| 1093 | /* No special finish for simple buffers */ | ||
| 1094 | AVERT(Buffer, buffer); | ||
| 1095 | AVER(BufferIsReset(buffer)); | ||
| 1096 | NOOP; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | |||
| 1100 | /* bufferTrivAttach -- basic buffer attach method */ | ||
| 1101 | |||
| 1102 | static void bufferTrivAttach(Buffer buffer, Addr base, Addr limit, | ||
| 1103 | Addr init, Size size) | ||
| 1104 | { | ||
| 1105 | /* No special attach method for simple buffers */ | ||
| 1106 | AVERT(Buffer, buffer); | ||
| 1107 | /* Other parameters are consistency checked in BufferAttach */ | ||
| 1108 | UNUSED(base); | ||
| 1109 | UNUSED(limit); | ||
| 1110 | UNUSED(init); | ||
| 1111 | UNUSED(size); | ||
| 1112 | NOOP; | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | |||
| 1116 | /* bufferTrivDetach -- basic buffer detach method */ | ||
| 1117 | |||
| 1118 | static void bufferTrivDetach(Buffer buffer) | ||
| 1119 | { | ||
| 1120 | /* No special detach method for simple buffers */ | ||
| 1121 | AVERT(Buffer, buffer); | ||
| 1122 | NOOP; | ||
| 1123 | } | ||
| 1124 | |||
| 1125 | |||
| 1126 | /* bufferNoSeg -- basic buffer BufferSeg accessor method | ||
| 1127 | * | ||
| 1128 | * .noseg: basic buffers don't support segments, so this method should | ||
| 1129 | * not be called. */ | ||
| 1130 | |||
| 1131 | static Seg bufferNoSeg (Buffer buffer) | ||
| 1132 | { | ||
| 1133 | AVERT(Buffer, buffer); | ||
| 1134 | NOTREACHED; /* .noseg */ | ||
| 1135 | return NULL; | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | |||
| 1139 | |||
| 1140 | /* bufferTrivRankSet -- basic BufferRankSet accessor method */ | ||
| 1141 | |||
| 1142 | static RankSet bufferTrivRankSet (Buffer buffer) | ||
| 1143 | { | ||
| 1144 | AVERT(Buffer, buffer); | ||
| 1145 | /* vanilla buffers can only have empty rank set */ | ||
| 1146 | return RankSetEMPTY; | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | |||
| 1150 | /* bufferNoSetRankSet -- basic BufferSetRankSet setter method | ||
| 1151 | * | ||
| 1152 | * .norank: basic buffers don't support ranksets, so this method should | ||
| 1153 | * not be called. */ | ||
| 1154 | |||
| 1155 | static void bufferNoSetRankSet (Buffer buffer, RankSet rankset) | ||
| 1156 | { | ||
| 1157 | AVERT(Buffer, buffer); | ||
| 1158 | AVERT(RankSet, rankset); | ||
| 1159 | NOTREACHED; /* .norank */ | ||
| 1160 | } | ||
| 1161 | |||
| 1162 | |||
| 1163 | /* bufferNoReassignSeg -- basic BufferReassignSeg method | ||
| 1164 | * | ||
| 1165 | * .noseg: basic buffers don't support attachment to sements, so this | ||
| 1166 | * method should not be called. */ | ||
| 1167 | |||
| 1168 | static void bufferNoReassignSeg (Buffer buffer, Seg seg) | ||
| 1169 | { | ||
| 1170 | AVERT(Buffer, buffer); | ||
| 1171 | AVERT(Seg, seg); | ||
| 1172 | NOTREACHED; /* .noseg */ | ||
| 1173 | } | ||
| 1174 | |||
| 1175 | |||
| 1176 | /* bufferTrivDescribe -- basic Buffer describe method */ | ||
| 1177 | |||
| 1178 | static Res bufferTrivDescribe(Buffer buffer, mps_lib_FILE *stream) | ||
| 1179 | { | ||
| 1180 | if (!CHECKT(Buffer, buffer)) return ResFAIL; | ||
| 1181 | if (stream == NULL) return ResFAIL; | ||
| 1182 | /* dispatching function does it all */ | ||
| 1183 | return ResOK; | ||
| 1184 | } | ||
| 1185 | |||
| 1186 | |||
| 1187 | /* BufferClassCheck -- check the consistency of a BufferClass */ | ||
| 1188 | |||
| 1189 | Bool BufferClassCheck(BufferClass class) | ||
| 1190 | { | ||
| 1191 | CHECKL(ProtocolClassCheck(&class->protocol)); | ||
| 1192 | CHECKL(class->name != NULL); /* Should be <=6 char C identifier */ | ||
| 1193 | CHECKL(class->size >= sizeof(BufferStruct)); | ||
| 1194 | CHECKL(FUNCHECK(class->init)); | ||
| 1195 | CHECKL(FUNCHECK(class->finish)); | ||
| 1196 | CHECKL(FUNCHECK(class->attach)); | ||
| 1197 | CHECKL(FUNCHECK(class->detach)); | ||
| 1198 | CHECKL(FUNCHECK(class->seg)); | ||
| 1199 | CHECKL(FUNCHECK(class->rankSet)); | ||
| 1200 | CHECKL(FUNCHECK(class->setRankSet)); | ||
| 1201 | CHECKL(FUNCHECK(class->reassignSeg)); | ||
| 1202 | CHECKL(FUNCHECK(class->describe)); | ||
| 1203 | CHECKS(BufferClass, class); | ||
| 1204 | return TRUE; | ||
| 1205 | } | ||
| 1206 | |||
| 1207 | |||
| 1208 | /* BufferClass -- the vanilla buffer class definition | ||
| 1209 | * | ||
| 1210 | * See design.mps.buffer.class.hierarchy.buffer. */ | ||
| 1211 | |||
| 1212 | DEFINE_CLASS(BufferClass, class) | ||
| 1213 | { | ||
| 1214 | INHERIT_CLASS(&class->protocol, ProtocolClass); | ||
| 1215 | class->name = "BUFFER"; | ||
| 1216 | class->size = sizeof(BufferStruct); | ||
| 1217 | class->init = bufferTrivInit; | ||
| 1218 | class->finish = bufferTrivFinish; | ||
| 1219 | class->attach = bufferTrivAttach; | ||
| 1220 | class->detach = bufferTrivDetach; | ||
| 1221 | class->describe = bufferTrivDescribe; | ||
| 1222 | class->seg = bufferNoSeg; | ||
| 1223 | class->rankSet = bufferTrivRankSet; | ||
| 1224 | class->setRankSet = bufferNoSetRankSet; | ||
| 1225 | class->reassignSeg = bufferNoReassignSeg; | ||
| 1226 | class->sig = BufferClassSig; | ||
| 1227 | } | ||
| 1228 | |||
| 1229 | |||
| 1230 | |||
| 1231 | /* SegBufClass -- support for the SegBuf subclass */ | ||
| 1232 | |||
| 1233 | |||
| 1234 | /* BufferSegBuf -- convert generic Buffer to a SegBuf */ | ||
| 1235 | |||
| 1236 | #define BufferSegBuf(buffer) ((SegBuf)(buffer)) | ||
| 1237 | |||
| 1238 | |||
| 1239 | /* SegBufCheck -- check consistency of a SegBuf */ | ||
| 1240 | |||
| 1241 | Bool SegBufCheck(SegBuf segbuf) | ||
| 1242 | { | ||
| 1243 | Buffer buffer; | ||
| 1244 | |||
| 1245 | CHECKS(SegBuf, segbuf); | ||
| 1246 | buffer = &segbuf->bufferStruct; | ||
| 1247 | CHECKL(BufferCheck(buffer)); | ||
| 1248 | CHECKL(RankSetCheck(segbuf->rankSet)); | ||
| 1249 | |||
| 1250 | if (buffer->mode & BufferModeTRANSITION) { | ||
| 1251 | /* nothing to check */ | ||
| 1252 | } else if ((buffer->mode & BufferModeATTACHED) == 0) { | ||
| 1253 | CHECKL(segbuf->seg == NULL); | ||
| 1254 | } else { | ||
| 1255 | /* The buffer is attached to a segment. */ | ||
| 1256 | CHECKL(segbuf->seg != NULL); | ||
| 1257 | CHECKL(SegCheck(segbuf->seg)); | ||
| 1258 | /* To avoid recursive checking, leave it to SegCheck to make */ | ||
| 1259 | /* sure the buffer and segment fields tally. */ | ||
| 1260 | |||
| 1261 | if (buffer->mode & BufferModeFLIPPED) { | ||
| 1262 | /* Only buffers that allocate pointers get flipped. */ | ||
| 1263 | CHECKL(segbuf->rankSet != RankSetEMPTY); | ||
| 1264 | } | ||
| 1265 | } | ||
| 1266 | |||
| 1267 | return TRUE; | ||
| 1268 | } | ||
| 1269 | |||
| 1270 | |||
| 1271 | /* segBufInit -- SegBuf init method */ | ||
| 1272 | |||
| 1273 | static Res segBufInit (Buffer buffer, Pool pool, va_list args) | ||
| 1274 | { | ||
| 1275 | BufferClass super; | ||
| 1276 | SegBuf segbuf; | ||
| 1277 | Res res; | ||
| 1278 | |||
| 1279 | AVERT(Buffer, buffer); | ||
| 1280 | AVERT(Pool, pool); | ||
| 1281 | segbuf = BufferSegBuf(buffer); | ||
| 1282 | |||
| 1283 | /* Initialize the superclass fields first via next-method call */ | ||
| 1284 | super = BUFFER_SUPERCLASS(SegBufClass); | ||
| 1285 | res = super->init(buffer, pool, args); | ||
| 1286 | if (res != ResOK) | ||
| 1287 | return res; | ||
| 1288 | |||
| 1289 | segbuf->seg = NULL; | ||
| 1290 | segbuf->sig = SegBufSig; | ||
| 1291 | segbuf->rankSet = RankSetEMPTY; | ||
| 1292 | |||
| 1293 | AVERT(SegBuf, segbuf); | ||
| 1294 | EVENT_PPU(BufferInitSeg, buffer, pool, buffer->isMutator); | ||
| 1295 | return ResOK; | ||
| 1296 | } | ||
| 1297 | |||
| 1298 | |||
| 1299 | /* segBufFinish -- SegBuf finish method */ | ||
| 1300 | |||
| 1301 | static void segBufFinish (Buffer buffer) | ||
| 1302 | { | ||
| 1303 | BufferClass super; | ||
| 1304 | SegBuf segbuf; | ||
| 1305 | |||
| 1306 | AVERT(Buffer, buffer); | ||
| 1307 | AVER(BufferIsReset(buffer)); | ||
| 1308 | segbuf = BufferSegBuf(buffer); | ||
| 1309 | AVERT(SegBuf, segbuf); | ||
| 1310 | |||
| 1311 | segbuf->sig = SigInvalid; | ||
| 1312 | |||
| 1313 | /* finish the superclass fields last */ | ||
| 1314 | super = BUFFER_SUPERCLASS(SegBufClass); | ||
| 1315 | super->finish(buffer); | ||
| 1316 | } | ||
| 1317 | |||
| 1318 | |||
| 1319 | /* segBufAttach -- SegBuf attach method */ | ||
| 1320 | |||
| 1321 | static void segBufAttach(Buffer buffer, Addr base, Addr limit, | ||
| 1322 | Addr init, Size size) | ||
| 1323 | { | ||
| 1324 | SegBuf segbuf; | ||
| 1325 | Seg seg; | ||
| 1326 | Arena arena; | ||
| 1327 | Bool found; | ||
| 1328 | |||
| 1329 | AVERT(Buffer, buffer); | ||
| 1330 | /* Other parameters are consistency checked in BufferAttach */ | ||
| 1331 | UNUSED(init); | ||
| 1332 | UNUSED(size); | ||
| 1333 | |||
| 1334 | segbuf = BufferSegBuf(buffer); | ||
| 1335 | arena = BufferArena(buffer); | ||
| 1336 | found = SegOfAddr(&seg, arena, base); | ||
| 1337 | AVER(found); | ||
| 1338 | AVER(segbuf->seg == NULL); | ||
| 1339 | AVER(SegBuffer(seg) == NULL); | ||
| 1340 | AVER(SegBase(seg) <= base); | ||
| 1341 | AVER(limit <= SegLimit(seg)); | ||
| 1342 | |||
| 1343 | /* attach the buffer to the segment */ | ||
| 1344 | SegSetBuffer(seg, buffer); | ||
| 1345 | segbuf->seg = seg; | ||
| 1346 | |||
| 1347 | AVERT(SegBuf, segbuf); | ||
| 1348 | } | ||
| 1349 | |||
| 1350 | |||
| 1351 | /* segBufDetach -- SegBuf detach method */ | ||
| 1352 | |||
| 1353 | static void segBufDetach(Buffer buffer) | ||
| 1354 | { | ||
| 1355 | SegBuf segbuf; | ||
| 1356 | Seg seg; | ||
| 1357 | |||
| 1358 | AVERT(Buffer, buffer); | ||
| 1359 | segbuf = BufferSegBuf(buffer); | ||
| 1360 | AVERT(SegBuf, segbuf); | ||
| 1361 | |||
| 1362 | seg = segbuf->seg; | ||
| 1363 | AVER(seg != NULL); | ||
| 1364 | SegSetBuffer(seg, NULL); | ||
| 1365 | segbuf->seg = NULL; | ||
| 1366 | } | ||
| 1367 | |||
| 1368 | |||
| 1369 | /* segBufSeg -- BufferSeg accessor method for SegBuf instances */ | ||
| 1370 | |||
| 1371 | static Seg segBufSeg (Buffer buffer) | ||
| 1372 | { | ||
| 1373 | SegBuf segbuf; | ||
| 1374 | |||
| 1375 | AVERT(Buffer, buffer); | ||
| 1376 | segbuf = BufferSegBuf(buffer); | ||
| 1377 | AVERT(SegBuf, segbuf); | ||
| 1378 | return segbuf->seg; | ||
| 1379 | } | ||
| 1380 | |||
| 1381 | |||
| 1382 | /* segBufRankSet -- BufferRankSet accessor for SegBuf instances */ | ||
| 1383 | |||
| 1384 | static RankSet segBufRankSet (Buffer buffer) | ||
| 1385 | { | ||
| 1386 | SegBuf segbuf; | ||
| 1387 | |||
| 1388 | AVERT(Buffer, buffer); | ||
| 1389 | segbuf = BufferSegBuf(buffer); | ||
| 1390 | AVERT(SegBuf, segbuf); | ||
| 1391 | return segbuf->rankSet; | ||
| 1392 | } | ||
| 1393 | |||
| 1394 | |||
| 1395 | /* segBufSetRankSet -- BufferSetRankSet setter method for SegBuf */ | ||
| 1396 | |||
| 1397 | static void segBufSetRankSet (Buffer buffer, RankSet rankset) | ||
| 1398 | { | ||
| 1399 | SegBuf segbuf; | ||
| 1400 | |||
| 1401 | AVERT(Buffer, buffer); | ||
| 1402 | AVERT(RankSet, rankset); | ||
| 1403 | segbuf = BufferSegBuf(buffer); | ||
| 1404 | AVERT(SegBuf, segbuf); | ||
| 1405 | segbuf->rankSet = rankset; | ||
| 1406 | } | ||
| 1407 | |||
| 1408 | |||
| 1409 | /* segBufReassignSeg -- BufferReassignSeg method for SegBuf | ||
| 1410 | * | ||
| 1411 | * Used to support segment merging and splitting. | ||
| 1412 | * | ||
| 1413 | * .invseg: On entry the buffer is attached to an invalid segment, which | ||
| 1414 | * can't be checked. The method is called to make the attachment valid. */ | ||
| 1415 | |||
| 1416 | static void segBufReassignSeg (Buffer buffer, Seg seg) | ||
| 1417 | { | ||
| 1418 | SegBuf segbuf; | ||
| 1419 | |||
| 1420 | AVERT(Buffer, buffer); | ||
| 1421 | AVERT(Seg, seg); | ||
| 1422 | segbuf = BufferSegBuf(buffer); | ||
| 1423 | /* Can't check segbuf on entry. See .invseg */ | ||
| 1424 | AVER(NULL != segbuf->seg); | ||
| 1425 | AVER(seg != segbuf->seg); | ||
| 1426 | segbuf->seg = seg; | ||
| 1427 | AVERT(SegBuf, segbuf); | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | |||
| 1431 | /* segBufDescribe -- describe method for SegBuf */ | ||
| 1432 | |||
| 1433 | static Res segBufDescribe(Buffer buffer, mps_lib_FILE *stream) | ||
| 1434 | { | ||
| 1435 | SegBuf segbuf; | ||
| 1436 | BufferClass super; | ||
| 1437 | Res res; | ||
| 1438 | |||
| 1439 | if (!CHECKT(Buffer, buffer)) return ResFAIL; | ||
| 1440 | if (stream == NULL) return ResFAIL; | ||
| 1441 | segbuf = BufferSegBuf(buffer); | ||
| 1442 | if (!CHECKT(SegBuf, segbuf)) return ResFAIL; | ||
| 1443 | |||
| 1444 | /* Describe the superclass fields first via next-method call */ | ||
| 1445 | super = BUFFER_SUPERCLASS(SegBufClass); | ||
| 1446 | res = super->describe(buffer, stream); | ||
| 1447 | if (res != ResOK) return res; | ||
| 1448 | |||
| 1449 | res = WriteF(stream, | ||
| 1450 | " Seg $P\n", (WriteFP)segbuf->seg, | ||
| 1451 | " rankSet $U\n", (WriteFU)segbuf->rankSet, | ||
| 1452 | NULL); | ||
| 1453 | |||
| 1454 | return res; | ||
| 1455 | } | ||
| 1456 | |||
| 1457 | |||
| 1458 | /* SegBufClass -- SegBuf class definition | ||
| 1459 | * | ||
| 1460 | * Supports an association with a single segment when attached. See | ||
| 1461 | * design.mps.buffer.class.hierarchy.segbuf. */ | ||
| 1462 | |||
| 1463 | typedef BufferClassStruct SegBufClassStruct; | ||
| 1464 | |||
| 1465 | DEFINE_CLASS(SegBufClass, class) | ||
| 1466 | { | ||
| 1467 | INHERIT_CLASS(class, BufferClass); | ||
| 1468 | class->name = "SEGBUF"; | ||
| 1469 | class->size = sizeof(SegBufStruct); | ||
| 1470 | class->init = segBufInit; | ||
| 1471 | class->finish = segBufFinish; | ||
| 1472 | class->attach = segBufAttach; | ||
| 1473 | class->detach = segBufDetach; | ||
| 1474 | class->describe = segBufDescribe; | ||
| 1475 | class->seg = segBufSeg; | ||
| 1476 | class->rankSet = segBufRankSet; | ||
| 1477 | class->setRankSet = segBufSetRankSet; | ||
| 1478 | class->reassignSeg = segBufReassignSeg; | ||
| 1479 | } | ||
| 1480 | |||
| 1481 | |||
| 1482 | /* RankBufClass -- support for the RankBufClass subclass */ | ||
| 1483 | |||
| 1484 | |||
| 1485 | /* rankBufInit -- RankBufClass init method */ | ||
| 1486 | |||
| 1487 | static Res rankBufInit (Buffer buffer, Pool pool, va_list args) | ||
| 1488 | { | ||
| 1489 | /* Assumes pun compatibility between Rank and mps_rank_t */ | ||
| 1490 | /* Which is checked by mpsi_check in impl.c.mpsi */ | ||
| 1491 | Rank rank = va_arg(args, Rank); | ||
| 1492 | BufferClass super; | ||
| 1493 | Res res; | ||
| 1494 | |||
| 1495 | AVERT(Buffer, buffer); | ||
| 1496 | AVERT(Pool, pool); | ||
| 1497 | AVER(RankCheck(rank)); | ||
| 1498 | |||
| 1499 | /* Initialize the superclass fields first via next-method call */ | ||
| 1500 | super = BUFFER_SUPERCLASS(RankBufClass); | ||
| 1501 | res = super->init(buffer, pool, args); | ||
| 1502 | if (res != ResOK) | ||
| 1503 | return res; | ||
| 1504 | |||
| 1505 | BufferSetRankSet(buffer, RankSetSingle(rank)); | ||
| 1506 | |||
| 1507 | /* There's nothing to check that the superclass doesn't, so no AVERT. */ | ||
| 1508 | EVENT_PPUU(BufferInitRank, buffer, pool, buffer->isMutator, rank); | ||
| 1509 | return ResOK; | ||
| 1510 | } | ||
| 1511 | |||
| 1512 | |||
| 1513 | /* RankBufClass -- RankBufClass class definition | ||
| 1514 | * | ||
| 1515 | * A subclass of SegBufClass, sharing structure for instances. | ||
| 1516 | * | ||
| 1517 | * Supports initialization to a rank supplied at creation time. */ | ||
| 1518 | |||
| 1519 | typedef BufferClassStruct RankBufClassStruct; | ||
| 1520 | |||
| 1521 | DEFINE_CLASS(RankBufClass, class) | ||
| 1522 | { | ||
| 1523 | INHERIT_CLASS(class, SegBufClass); | ||
| 1524 | class->name = "RANKBUF"; | ||
| 1525 | class->init = rankBufInit; | ||
| 1526 | } | ||