Mercurial
comparison third_party/luajit/src/lj_gc.c @ 178:94705b5986b3
[ThirdParty] Added WRK and luajit for load testing.
| author | MrJuneJune <me@mrjunejune.com> |
|---|---|
| date | Thu, 22 Jan 2026 20:10:30 -0800 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| 177:24fe8ff94056 | 178:94705b5986b3 |
|---|---|
| 1 /* | |
| 2 ** Garbage collector. | |
| 3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h | |
| 4 ** | |
| 5 ** Major portions taken verbatim or adapted from the Lua interpreter. | |
| 6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h | |
| 7 */ | |
| 8 | |
| 9 #define lj_gc_c | |
| 10 #define LUA_CORE | |
| 11 | |
| 12 #include "lj_obj.h" | |
| 13 #include "lj_gc.h" | |
| 14 #include "lj_err.h" | |
| 15 #include "lj_buf.h" | |
| 16 #include "lj_str.h" | |
| 17 #include "lj_tab.h" | |
| 18 #include "lj_func.h" | |
| 19 #include "lj_udata.h" | |
| 20 #include "lj_meta.h" | |
| 21 #include "lj_state.h" | |
| 22 #include "lj_frame.h" | |
| 23 #if LJ_HASFFI | |
| 24 #include "lj_ctype.h" | |
| 25 #include "lj_cdata.h" | |
| 26 #endif | |
| 27 #include "lj_trace.h" | |
| 28 #include "lj_dispatch.h" | |
| 29 #include "lj_vm.h" | |
| 30 #include "lj_vmevent.h" | |
| 31 | |
| 32 #define GCSTEPSIZE 1024u | |
| 33 #define GCSWEEPMAX 40 | |
| 34 #define GCSWEEPCOST 10 | |
| 35 #define GCFINALIZECOST 100 | |
| 36 | |
| 37 /* Macros to set GCobj colors and flags. */ | |
| 38 #define white2gray(x) ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES) | |
| 39 #define gray2black(x) ((x)->gch.marked |= LJ_GC_BLACK) | |
| 40 #define isfinalized(u) ((u)->marked & LJ_GC_FINALIZED) | |
| 41 | |
| 42 /* -- Mark phase ---------------------------------------------------------- */ | |
| 43 | |
| 44 /* Mark a TValue (if needed). */ | |
| 45 #define gc_marktv(g, tv) \ | |
| 46 { lj_assertG(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct), \ | |
| 47 "TValue and GC type mismatch"); \ | |
| 48 if (tviswhite(tv)) gc_mark(g, gcV(tv)); } | |
| 49 | |
| 50 /* Mark a GCobj (if needed). */ | |
| 51 #define gc_markobj(g, o) \ | |
| 52 { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); } | |
| 53 | |
| 54 /* Mark a string object. */ | |
| 55 #define gc_mark_str(s) ((s)->marked &= (uint8_t)~LJ_GC_WHITES) | |
| 56 | |
| 57 /* Mark a white GCobj. */ | |
| 58 static void gc_mark(global_State *g, GCobj *o) | |
| 59 { | |
| 60 int gct = o->gch.gct; | |
| 61 lj_assertG(iswhite(o), "mark of non-white object"); | |
| 62 lj_assertG(!isdead(g, o), "mark of dead object"); | |
| 63 white2gray(o); | |
| 64 if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) { | |
| 65 GCtab *mt = tabref(gco2ud(o)->metatable); | |
| 66 gray2black(o); /* Userdata are never gray. */ | |
| 67 if (mt) gc_markobj(g, mt); | |
| 68 gc_markobj(g, tabref(gco2ud(o)->env)); | |
| 69 if (LJ_HASBUFFER && gco2ud(o)->udtype == UDTYPE_BUFFER) { | |
| 70 SBufExt *sbx = (SBufExt *)uddata(gco2ud(o)); | |
| 71 if (sbufiscow(sbx) && gcref(sbx->cowref)) | |
| 72 gc_markobj(g, gcref(sbx->cowref)); | |
| 73 if (gcref(sbx->dict_str)) | |
| 74 gc_markobj(g, gcref(sbx->dict_str)); | |
| 75 if (gcref(sbx->dict_mt)) | |
| 76 gc_markobj(g, gcref(sbx->dict_mt)); | |
| 77 } | |
| 78 } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) { | |
| 79 GCupval *uv = gco2uv(o); | |
| 80 gc_marktv(g, uvval(uv)); | |
| 81 if (uv->closed) | |
| 82 gray2black(o); /* Closed upvalues are never gray. */ | |
| 83 } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) { | |
| 84 lj_assertG(gct == ~LJ_TFUNC || gct == ~LJ_TTAB || | |
| 85 gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE, | |
| 86 "bad GC type %d", gct); | |
| 87 setgcrefr(o->gch.gclist, g->gc.gray); | |
| 88 setgcref(g->gc.gray, o); | |
| 89 } | |
| 90 } | |
| 91 | |
| 92 /* Mark GC roots. */ | |
| 93 static void gc_mark_gcroot(global_State *g) | |
| 94 { | |
| 95 ptrdiff_t i; | |
| 96 for (i = 0; i < GCROOT_MAX; i++) | |
| 97 if (gcref(g->gcroot[i]) != NULL) | |
| 98 gc_markobj(g, gcref(g->gcroot[i])); | |
| 99 } | |
| 100 | |
| 101 /* Start a GC cycle and mark the root set. */ | |
| 102 static void gc_mark_start(global_State *g) | |
| 103 { | |
| 104 setgcrefnull(g->gc.gray); | |
| 105 setgcrefnull(g->gc.grayagain); | |
| 106 setgcrefnull(g->gc.weak); | |
| 107 gc_markobj(g, mainthread(g)); | |
| 108 gc_markobj(g, tabref(mainthread(g)->env)); | |
| 109 gc_marktv(g, &g->registrytv); | |
| 110 gc_mark_gcroot(g); | |
| 111 g->gc.state = GCSpropagate; | |
| 112 } | |
| 113 | |
| 114 /* Mark open upvalues. */ | |
| 115 static void gc_mark_uv(global_State *g) | |
| 116 { | |
| 117 GCupval *uv; | |
| 118 for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) { | |
| 119 lj_assertG(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv, | |
| 120 "broken upvalue chain"); | |
| 121 if (isgray(obj2gco(uv))) | |
| 122 gc_marktv(g, uvval(uv)); | |
| 123 } | |
| 124 } | |
| 125 | |
| 126 /* Mark userdata in mmudata list. */ | |
| 127 static void gc_mark_mmudata(global_State *g) | |
| 128 { | |
| 129 GCobj *root = gcref(g->gc.mmudata); | |
| 130 GCobj *u = root; | |
| 131 if (u) { | |
| 132 do { | |
| 133 u = gcnext(u); | |
| 134 makewhite(g, u); /* Could be from previous GC. */ | |
| 135 gc_mark(g, u); | |
| 136 } while (u != root); | |
| 137 } | |
| 138 } | |
| 139 | |
| 140 /* Separate userdata objects to be finalized to mmudata list. */ | |
| 141 size_t lj_gc_separateudata(global_State *g, int all) | |
| 142 { | |
| 143 size_t m = 0; | |
| 144 GCRef *p = &mainthread(g)->nextgc; | |
| 145 GCobj *o; | |
| 146 while ((o = gcref(*p)) != NULL) { | |
| 147 if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) { | |
| 148 p = &o->gch.nextgc; /* Nothing to do. */ | |
| 149 } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) { | |
| 150 markfinalized(o); /* Done, as there's no __gc metamethod. */ | |
| 151 p = &o->gch.nextgc; | |
| 152 } else { /* Otherwise move userdata to be finalized to mmudata list. */ | |
| 153 m += sizeudata(gco2ud(o)); | |
| 154 markfinalized(o); | |
| 155 *p = o->gch.nextgc; | |
| 156 if (gcref(g->gc.mmudata)) { /* Link to end of mmudata list. */ | |
| 157 GCobj *root = gcref(g->gc.mmudata); | |
| 158 setgcrefr(o->gch.nextgc, root->gch.nextgc); | |
| 159 setgcref(root->gch.nextgc, o); | |
| 160 setgcref(g->gc.mmudata, o); | |
| 161 } else { /* Create circular list. */ | |
| 162 setgcref(o->gch.nextgc, o); | |
| 163 setgcref(g->gc.mmudata, o); | |
| 164 } | |
| 165 } | |
| 166 } | |
| 167 return m; | |
| 168 } | |
| 169 | |
| 170 /* -- Propagation phase --------------------------------------------------- */ | |
| 171 | |
| 172 /* Traverse a table. */ | |
| 173 static int gc_traverse_tab(global_State *g, GCtab *t) | |
| 174 { | |
| 175 int weak = 0; | |
| 176 cTValue *mode; | |
| 177 GCtab *mt = tabref(t->metatable); | |
| 178 if (mt) | |
| 179 gc_markobj(g, mt); | |
| 180 mode = lj_meta_fastg(g, mt, MM_mode); | |
| 181 if (mode && tvisstr(mode)) { /* Valid __mode field? */ | |
| 182 const char *modestr = strVdata(mode); | |
| 183 int c; | |
| 184 while ((c = *modestr++)) { | |
| 185 if (c == 'k') weak |= LJ_GC_WEAKKEY; | |
| 186 else if (c == 'v') weak |= LJ_GC_WEAKVAL; | |
| 187 } | |
| 188 if (weak) { /* Weak tables are cleared in the atomic phase. */ | |
| 189 #if LJ_HASFFI | |
| 190 CTState *cts = ctype_ctsG(g); | |
| 191 if (cts && cts->finalizer == t) { | |
| 192 weak = (int)(~0u & ~LJ_GC_WEAKVAL); | |
| 193 } else | |
| 194 #endif | |
| 195 { | |
| 196 t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak); | |
| 197 setgcrefr(t->gclist, g->gc.weak); | |
| 198 setgcref(g->gc.weak, obj2gco(t)); | |
| 199 } | |
| 200 } | |
| 201 } | |
| 202 if (weak == LJ_GC_WEAK) /* Nothing to mark if both keys/values are weak. */ | |
| 203 return 1; | |
| 204 if (!(weak & LJ_GC_WEAKVAL)) { /* Mark array part. */ | |
| 205 MSize i, asize = t->asize; | |
| 206 for (i = 0; i < asize; i++) | |
| 207 gc_marktv(g, arrayslot(t, i)); | |
| 208 } | |
| 209 if (t->hmask > 0) { /* Mark hash part. */ | |
| 210 Node *node = noderef(t->node); | |
| 211 MSize i, hmask = t->hmask; | |
| 212 for (i = 0; i <= hmask; i++) { | |
| 213 Node *n = &node[i]; | |
| 214 if (!tvisnil(&n->val)) { /* Mark non-empty slot. */ | |
| 215 lj_assertG(!tvisnil(&n->key), "mark of nil key in non-empty slot"); | |
| 216 if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key); | |
| 217 if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val); | |
| 218 } | |
| 219 } | |
| 220 } | |
| 221 return weak; | |
| 222 } | |
| 223 | |
| 224 /* Traverse a function. */ | |
| 225 static void gc_traverse_func(global_State *g, GCfunc *fn) | |
| 226 { | |
| 227 gc_markobj(g, tabref(fn->c.env)); | |
| 228 if (isluafunc(fn)) { | |
| 229 uint32_t i; | |
| 230 lj_assertG(fn->l.nupvalues <= funcproto(fn)->sizeuv, | |
| 231 "function upvalues out of range"); | |
| 232 gc_markobj(g, funcproto(fn)); | |
| 233 for (i = 0; i < fn->l.nupvalues; i++) /* Mark Lua function upvalues. */ | |
| 234 gc_markobj(g, &gcref(fn->l.uvptr[i])->uv); | |
| 235 } else { | |
| 236 uint32_t i; | |
| 237 for (i = 0; i < fn->c.nupvalues; i++) /* Mark C function upvalues. */ | |
| 238 gc_marktv(g, &fn->c.upvalue[i]); | |
| 239 } | |
| 240 } | |
| 241 | |
| 242 #if LJ_HASJIT | |
| 243 /* Mark a trace. */ | |
| 244 static void gc_marktrace(global_State *g, TraceNo traceno) | |
| 245 { | |
| 246 GCobj *o = obj2gco(traceref(G2J(g), traceno)); | |
| 247 lj_assertG(traceno != G2J(g)->cur.traceno, "active trace escaped"); | |
| 248 if (iswhite(o)) { | |
| 249 white2gray(o); | |
| 250 setgcrefr(o->gch.gclist, g->gc.gray); | |
| 251 setgcref(g->gc.gray, o); | |
| 252 } | |
| 253 } | |
| 254 | |
| 255 /* Traverse a trace. */ | |
| 256 static void gc_traverse_trace(global_State *g, GCtrace *T) | |
| 257 { | |
| 258 IRRef ref; | |
| 259 if (T->traceno == 0) return; | |
| 260 for (ref = T->nk; ref < REF_TRUE; ref++) { | |
| 261 IRIns *ir = &T->ir[ref]; | |
| 262 if (ir->o == IR_KGC) | |
| 263 gc_markobj(g, ir_kgc(ir)); | |
| 264 if (irt_is64(ir->t) && ir->o != IR_KNULL) | |
| 265 ref++; | |
| 266 } | |
| 267 if (T->link) gc_marktrace(g, T->link); | |
| 268 if (T->nextroot) gc_marktrace(g, T->nextroot); | |
| 269 if (T->nextside) gc_marktrace(g, T->nextside); | |
| 270 gc_markobj(g, gcref(T->startpt)); | |
| 271 } | |
| 272 | |
| 273 /* The current trace is a GC root while not anchored in the prototype (yet). */ | |
| 274 #define gc_traverse_curtrace(g) gc_traverse_trace(g, &G2J(g)->cur) | |
| 275 #else | |
| 276 #define gc_traverse_curtrace(g) UNUSED(g) | |
| 277 #endif | |
| 278 | |
| 279 /* Traverse a prototype. */ | |
| 280 static void gc_traverse_proto(global_State *g, GCproto *pt) | |
| 281 { | |
| 282 ptrdiff_t i; | |
| 283 gc_mark_str(proto_chunkname(pt)); | |
| 284 for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++) /* Mark collectable consts. */ | |
| 285 gc_markobj(g, proto_kgc(pt, i)); | |
| 286 #if LJ_HASJIT | |
| 287 if (pt->trace) gc_marktrace(g, pt->trace); | |
| 288 #endif | |
| 289 } | |
| 290 | |
| 291 /* Traverse the frame structure of a stack. */ | |
| 292 static MSize gc_traverse_frames(global_State *g, lua_State *th) | |
| 293 { | |
| 294 TValue *frame, *top = th->top-1, *bot = tvref(th->stack); | |
| 295 /* Note: extra vararg frame not skipped, marks function twice (harmless). */ | |
| 296 for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) { | |
| 297 GCfunc *fn = frame_func(frame); | |
| 298 TValue *ftop = frame; | |
| 299 if (isluafunc(fn)) ftop += funcproto(fn)->framesize; | |
| 300 if (ftop > top) top = ftop; | |
| 301 if (!LJ_FR2) gc_markobj(g, fn); /* Need to mark hidden function (or L). */ | |
| 302 } | |
| 303 top++; /* Correct bias of -1 (frame == base-1). */ | |
| 304 if (top > tvref(th->maxstack)) top = tvref(th->maxstack); | |
| 305 return (MSize)(top - bot); /* Return minimum needed stack size. */ | |
| 306 } | |
| 307 | |
| 308 /* Traverse a thread object. */ | |
| 309 static void gc_traverse_thread(global_State *g, lua_State *th) | |
| 310 { | |
| 311 TValue *o, *top = th->top; | |
| 312 for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++) | |
| 313 gc_marktv(g, o); | |
| 314 if (g->gc.state == GCSatomic) { | |
| 315 top = tvref(th->stack) + th->stacksize; | |
| 316 for (; o < top; o++) /* Clear unmarked slots. */ | |
| 317 setnilV(o); | |
| 318 } | |
| 319 gc_markobj(g, tabref(th->env)); | |
| 320 lj_state_shrinkstack(th, gc_traverse_frames(g, th)); | |
| 321 } | |
| 322 | |
| 323 /* Propagate one gray object. Traverse it and turn it black. */ | |
| 324 static size_t propagatemark(global_State *g) | |
| 325 { | |
| 326 GCobj *o = gcref(g->gc.gray); | |
| 327 int gct = o->gch.gct; | |
| 328 lj_assertG(isgray(o), "propagation of non-gray object"); | |
| 329 gray2black(o); | |
| 330 setgcrefr(g->gc.gray, o->gch.gclist); /* Remove from gray list. */ | |
| 331 if (LJ_LIKELY(gct == ~LJ_TTAB)) { | |
| 332 GCtab *t = gco2tab(o); | |
| 333 if (gc_traverse_tab(g, t) > 0) | |
| 334 black2gray(o); /* Keep weak tables gray. */ | |
| 335 return sizeof(GCtab) + sizeof(TValue) * t->asize + | |
| 336 (t->hmask ? sizeof(Node) * (t->hmask + 1) : 0); | |
| 337 } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) { | |
| 338 GCfunc *fn = gco2func(o); | |
| 339 gc_traverse_func(g, fn); | |
| 340 return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) : | |
| 341 sizeCfunc((MSize)fn->c.nupvalues); | |
| 342 } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) { | |
| 343 GCproto *pt = gco2pt(o); | |
| 344 gc_traverse_proto(g, pt); | |
| 345 return pt->sizept; | |
| 346 } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) { | |
| 347 lua_State *th = gco2th(o); | |
| 348 setgcrefr(th->gclist, g->gc.grayagain); | |
| 349 setgcref(g->gc.grayagain, o); | |
| 350 black2gray(o); /* Threads are never black. */ | |
| 351 gc_traverse_thread(g, th); | |
| 352 return sizeof(lua_State) + sizeof(TValue) * th->stacksize; | |
| 353 } else { | |
| 354 #if LJ_HASJIT | |
| 355 GCtrace *T = gco2trace(o); | |
| 356 gc_traverse_trace(g, T); | |
| 357 return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + | |
| 358 T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry); | |
| 359 #else | |
| 360 lj_assertG(0, "bad GC type %d", gct); | |
| 361 return 0; | |
| 362 #endif | |
| 363 } | |
| 364 } | |
| 365 | |
| 366 /* Propagate all gray objects. */ | |
| 367 static size_t gc_propagate_gray(global_State *g) | |
| 368 { | |
| 369 size_t m = 0; | |
| 370 while (gcref(g->gc.gray) != NULL) | |
| 371 m += propagatemark(g); | |
| 372 return m; | |
| 373 } | |
| 374 | |
| 375 /* -- Sweep phase --------------------------------------------------------- */ | |
| 376 | |
| 377 /* Type of GC free functions. */ | |
| 378 typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o); | |
| 379 | |
| 380 /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */ | |
| 381 static const GCFreeFunc gc_freefunc[] = { | |
| 382 (GCFreeFunc)lj_str_free, | |
| 383 (GCFreeFunc)lj_func_freeuv, | |
| 384 (GCFreeFunc)lj_state_free, | |
| 385 (GCFreeFunc)lj_func_freeproto, | |
| 386 (GCFreeFunc)lj_func_free, | |
| 387 #if LJ_HASJIT | |
| 388 (GCFreeFunc)lj_trace_free, | |
| 389 #else | |
| 390 (GCFreeFunc)0, | |
| 391 #endif | |
| 392 #if LJ_HASFFI | |
| 393 (GCFreeFunc)lj_cdata_free, | |
| 394 #else | |
| 395 (GCFreeFunc)0, | |
| 396 #endif | |
| 397 (GCFreeFunc)lj_tab_free, | |
| 398 (GCFreeFunc)lj_udata_free | |
| 399 }; | |
| 400 | |
| 401 /* Full sweep of a GC list. */ | |
| 402 #define gc_fullsweep(g, p) gc_sweep(g, (p), ~(uint32_t)0) | |
| 403 | |
| 404 /* Partial sweep of a GC list. */ | |
| 405 static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim) | |
| 406 { | |
| 407 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */ | |
| 408 int ow = otherwhite(g); | |
| 409 GCobj *o; | |
| 410 while ((o = gcref(*p)) != NULL && lim-- > 0) { | |
| 411 if (o->gch.gct == ~LJ_TTHREAD) /* Need to sweep open upvalues, too. */ | |
| 412 gc_fullsweep(g, &gco2th(o)->openupval); | |
| 413 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */ | |
| 414 lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED), | |
| 415 "sweep of undead object"); | |
| 416 makewhite(g, o); /* Value is alive, change to the current white. */ | |
| 417 p = &o->gch.nextgc; | |
| 418 } else { /* Otherwise value is dead, free it. */ | |
| 419 lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED, | |
| 420 "sweep of unlive object"); | |
| 421 setgcrefr(*p, o->gch.nextgc); | |
| 422 if (o == gcref(g->gc.root)) | |
| 423 setgcrefr(g->gc.root, o->gch.nextgc); /* Adjust list anchor. */ | |
| 424 gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o); | |
| 425 } | |
| 426 } | |
| 427 return p; | |
| 428 } | |
| 429 | |
| 430 /* Sweep one string interning table chain. Preserves hashalg bit. */ | |
| 431 static void gc_sweepstr(global_State *g, GCRef *chain) | |
| 432 { | |
| 433 /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */ | |
| 434 int ow = otherwhite(g); | |
| 435 uintptr_t u = gcrefu(*chain); | |
| 436 GCRef q; | |
| 437 GCRef *p = &q; | |
| 438 GCobj *o; | |
| 439 setgcrefp(q, (u & ~(uintptr_t)1)); | |
| 440 while ((o = gcref(*p)) != NULL) { | |
| 441 if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) { /* Black or current white? */ | |
| 442 lj_assertG(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED), | |
| 443 "sweep of undead string"); | |
| 444 makewhite(g, o); /* String is alive, change to the current white. */ | |
| 445 p = &o->gch.nextgc; | |
| 446 } else { /* Otherwise string is dead, free it. */ | |
| 447 lj_assertG(isdead(g, o) || ow == LJ_GC_SFIXED, | |
| 448 "sweep of unlive string"); | |
| 449 setgcrefr(*p, o->gch.nextgc); | |
| 450 lj_str_free(g, gco2str(o)); | |
| 451 } | |
| 452 } | |
| 453 setgcrefp(*chain, (gcrefu(q) | (u & 1))); | |
| 454 } | |
| 455 | |
| 456 /* Check whether we can clear a key or a value slot from a table. */ | |
| 457 static int gc_mayclear(cTValue *o, int val) | |
| 458 { | |
| 459 if (tvisgcv(o)) { /* Only collectable objects can be weak references. */ | |
| 460 if (tvisstr(o)) { /* But strings cannot be used as weak references. */ | |
| 461 gc_mark_str(strV(o)); /* And need to be marked. */ | |
| 462 return 0; | |
| 463 } | |
| 464 if (iswhite(gcV(o))) | |
| 465 return 1; /* Object is about to be collected. */ | |
| 466 if (tvisudata(o) && val && isfinalized(udataV(o))) | |
| 467 return 1; /* Finalized userdata is dropped only from values. */ | |
| 468 } | |
| 469 return 0; /* Cannot clear. */ | |
| 470 } | |
| 471 | |
| 472 /* Clear collected entries from weak tables. */ | |
| 473 static void gc_clearweak(global_State *g, GCobj *o) | |
| 474 { | |
| 475 UNUSED(g); | |
| 476 while (o) { | |
| 477 GCtab *t = gco2tab(o); | |
| 478 lj_assertG((t->marked & LJ_GC_WEAK), "clear of non-weak table"); | |
| 479 if ((t->marked & LJ_GC_WEAKVAL)) { | |
| 480 MSize i, asize = t->asize; | |
| 481 for (i = 0; i < asize; i++) { | |
| 482 /* Clear array slot when value is about to be collected. */ | |
| 483 TValue *tv = arrayslot(t, i); | |
| 484 if (gc_mayclear(tv, 1)) | |
| 485 setnilV(tv); | |
| 486 } | |
| 487 } | |
| 488 if (t->hmask > 0) { | |
| 489 Node *node = noderef(t->node); | |
| 490 MSize i, hmask = t->hmask; | |
| 491 for (i = 0; i <= hmask; i++) { | |
| 492 Node *n = &node[i]; | |
| 493 /* Clear hash slot when key or value is about to be collected. */ | |
| 494 if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) || | |
| 495 gc_mayclear(&n->val, 1))) | |
| 496 setnilV(&n->val); | |
| 497 } | |
| 498 } | |
| 499 o = gcref(t->gclist); | |
| 500 } | |
| 501 } | |
| 502 | |
| 503 /* Call a userdata or cdata finalizer. */ | |
| 504 static void gc_call_finalizer(global_State *g, lua_State *L, | |
| 505 cTValue *mo, GCobj *o) | |
| 506 { | |
| 507 /* Save and restore lots of state around the __gc callback. */ | |
| 508 uint8_t oldh = hook_save(g); | |
| 509 GCSize oldt = g->gc.threshold; | |
| 510 int errcode; | |
| 511 TValue *top; | |
| 512 lj_trace_abort(g); | |
| 513 hook_entergc(g); /* Disable hooks and new traces during __gc. */ | |
| 514 if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g); | |
| 515 g->gc.threshold = LJ_MAX_MEM; /* Prevent GC steps. */ | |
| 516 top = L->top; | |
| 517 copyTV(L, top++, mo); | |
| 518 if (LJ_FR2) setnilV(top++); | |
| 519 setgcV(L, top, o, ~o->gch.gct); | |
| 520 L->top = top+1; | |
| 521 errcode = lj_vm_pcall(L, top, 1+0, -1); /* Stack: |mo|o| -> | */ | |
| 522 hook_restore(g, oldh); | |
| 523 if (LJ_HASPROFILE && (oldh & HOOK_PROFILE)) lj_dispatch_update(g); | |
| 524 g->gc.threshold = oldt; /* Restore GC threshold. */ | |
| 525 if (errcode) { | |
| 526 ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */ | |
| 527 lj_vmevent_send(L, ERRFIN, | |
| 528 copyTV(L, L->top++, restorestack(L, errobj)); | |
| 529 ); | |
| 530 L->top--; | |
| 531 } | |
| 532 } | |
| 533 | |
| 534 /* Finalize one userdata or cdata object from the mmudata list. */ | |
| 535 static void gc_finalize(lua_State *L) | |
| 536 { | |
| 537 global_State *g = G(L); | |
| 538 GCobj *o = gcnext(gcref(g->gc.mmudata)); | |
| 539 cTValue *mo; | |
| 540 lj_assertG(tvref(g->jit_base) == NULL, "finalizer called on trace"); | |
| 541 /* Unchain from list of userdata to be finalized. */ | |
| 542 if (o == gcref(g->gc.mmudata)) | |
| 543 setgcrefnull(g->gc.mmudata); | |
| 544 else | |
| 545 setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc); | |
| 546 #if LJ_HASFFI | |
| 547 if (o->gch.gct == ~LJ_TCDATA) { | |
| 548 TValue tmp, *tv; | |
| 549 /* Add cdata back to the GC list and make it white. */ | |
| 550 setgcrefr(o->gch.nextgc, g->gc.root); | |
| 551 setgcref(g->gc.root, o); | |
| 552 makewhite(g, o); | |
| 553 o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN; | |
| 554 /* Resolve finalizer. */ | |
| 555 setcdataV(L, &tmp, gco2cd(o)); | |
| 556 tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp); | |
| 557 if (!tvisnil(tv)) { | |
| 558 g->gc.nocdatafin = 0; | |
| 559 copyTV(L, &tmp, tv); | |
| 560 setnilV(tv); /* Clear entry in finalizer table. */ | |
| 561 gc_call_finalizer(g, L, &tmp, o); | |
| 562 } | |
| 563 return; | |
| 564 } | |
| 565 #endif | |
| 566 /* Add userdata back to the main userdata list and make it white. */ | |
| 567 setgcrefr(o->gch.nextgc, mainthread(g)->nextgc); | |
| 568 setgcref(mainthread(g)->nextgc, o); | |
| 569 makewhite(g, o); | |
| 570 /* Resolve the __gc metamethod. */ | |
| 571 mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc); | |
| 572 if (mo) | |
| 573 gc_call_finalizer(g, L, mo, o); | |
| 574 } | |
| 575 | |
| 576 /* Finalize all userdata objects from mmudata list. */ | |
| 577 void lj_gc_finalize_udata(lua_State *L) | |
| 578 { | |
| 579 while (gcref(G(L)->gc.mmudata) != NULL) | |
| 580 gc_finalize(L); | |
| 581 } | |
| 582 | |
| 583 #if LJ_HASFFI | |
| 584 /* Finalize all cdata objects from finalizer table. */ | |
| 585 void lj_gc_finalize_cdata(lua_State *L) | |
| 586 { | |
| 587 global_State *g = G(L); | |
| 588 CTState *cts = ctype_ctsG(g); | |
| 589 if (cts) { | |
| 590 GCtab *t = cts->finalizer; | |
| 591 Node *node = noderef(t->node); | |
| 592 ptrdiff_t i; | |
| 593 setgcrefnull(t->metatable); /* Mark finalizer table as disabled. */ | |
| 594 for (i = (ptrdiff_t)t->hmask; i >= 0; i--) | |
| 595 if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) { | |
| 596 GCobj *o = gcV(&node[i].key); | |
| 597 TValue tmp; | |
| 598 makewhite(g, o); | |
| 599 o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN; | |
| 600 copyTV(L, &tmp, &node[i].val); | |
| 601 setnilV(&node[i].val); | |
| 602 gc_call_finalizer(g, L, &tmp, o); | |
| 603 } | |
| 604 } | |
| 605 } | |
| 606 #endif | |
| 607 | |
| 608 /* Free all remaining GC objects. */ | |
| 609 void lj_gc_freeall(global_State *g) | |
| 610 { | |
| 611 MSize i, strmask; | |
| 612 /* Free everything, except super-fixed objects (the main thread). */ | |
| 613 g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED; | |
| 614 gc_fullsweep(g, &g->gc.root); | |
| 615 strmask = g->str.mask; | |
| 616 for (i = 0; i <= strmask; i++) /* Free all string hash chains. */ | |
| 617 gc_sweepstr(g, &g->str.tab[i]); | |
| 618 } | |
| 619 | |
| 620 /* -- Collector ----------------------------------------------------------- */ | |
| 621 | |
| 622 /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */ | |
| 623 static void atomic(global_State *g, lua_State *L) | |
| 624 { | |
| 625 size_t udsize; | |
| 626 | |
| 627 gc_mark_uv(g); /* Need to remark open upvalues (the thread may be dead). */ | |
| 628 gc_propagate_gray(g); /* Propagate any left-overs. */ | |
| 629 | |
| 630 setgcrefr(g->gc.gray, g->gc.weak); /* Empty the list of weak tables. */ | |
| 631 setgcrefnull(g->gc.weak); | |
| 632 lj_assertG(!iswhite(obj2gco(mainthread(g))), "main thread turned white"); | |
| 633 gc_markobj(g, L); /* Mark running thread. */ | |
| 634 gc_traverse_curtrace(g); /* Traverse current trace. */ | |
| 635 gc_mark_gcroot(g); /* Mark GC roots (again). */ | |
| 636 gc_propagate_gray(g); /* Propagate all of the above. */ | |
| 637 | |
| 638 setgcrefr(g->gc.gray, g->gc.grayagain); /* Empty the 2nd chance list. */ | |
| 639 setgcrefnull(g->gc.grayagain); | |
| 640 gc_propagate_gray(g); /* Propagate it. */ | |
| 641 | |
| 642 udsize = lj_gc_separateudata(g, 0); /* Separate userdata to be finalized. */ | |
| 643 gc_mark_mmudata(g); /* Mark them. */ | |
| 644 udsize += gc_propagate_gray(g); /* And propagate the marks. */ | |
| 645 | |
| 646 /* All marking done, clear weak tables. */ | |
| 647 gc_clearweak(g, gcref(g->gc.weak)); | |
| 648 | |
| 649 lj_buf_shrink(L, &g->tmpbuf); /* Shrink temp buffer. */ | |
| 650 | |
| 651 /* Prepare for sweep phase. */ | |
| 652 g->gc.currentwhite = (uint8_t)otherwhite(g); /* Flip current white. */ | |
| 653 g->strempty.marked = g->gc.currentwhite; | |
| 654 setmref(g->gc.sweep, &g->gc.root); | |
| 655 g->gc.estimate = g->gc.total - (GCSize)udsize; /* Initial estimate. */ | |
| 656 } | |
| 657 | |
| 658 /* GC state machine. Returns a cost estimate for each step performed. */ | |
| 659 static size_t gc_onestep(lua_State *L) | |
| 660 { | |
| 661 global_State *g = G(L); | |
| 662 switch (g->gc.state) { | |
| 663 case GCSpause: | |
| 664 gc_mark_start(g); /* Start a new GC cycle by marking all GC roots. */ | |
| 665 return 0; | |
| 666 case GCSpropagate: | |
| 667 if (gcref(g->gc.gray) != NULL) | |
| 668 return propagatemark(g); /* Propagate one gray object. */ | |
| 669 g->gc.state = GCSatomic; /* End of mark phase. */ | |
| 670 return 0; | |
| 671 case GCSatomic: | |
| 672 if (tvref(g->jit_base)) /* Don't run atomic phase on trace. */ | |
| 673 return LJ_MAX_MEM; | |
| 674 atomic(g, L); | |
| 675 g->gc.state = GCSsweepstring; /* Start of sweep phase. */ | |
| 676 g->gc.sweepstr = 0; | |
| 677 return 0; | |
| 678 case GCSsweepstring: { | |
| 679 GCSize old = g->gc.total; | |
| 680 gc_sweepstr(g, &g->str.tab[g->gc.sweepstr++]); /* Sweep one chain. */ | |
| 681 if (g->gc.sweepstr > g->str.mask) | |
| 682 g->gc.state = GCSsweep; /* All string hash chains sweeped. */ | |
| 683 lj_assertG(old >= g->gc.total, "sweep increased memory"); | |
| 684 g->gc.estimate -= old - g->gc.total; | |
| 685 return GCSWEEPCOST; | |
| 686 } | |
| 687 case GCSsweep: { | |
| 688 GCSize old = g->gc.total; | |
| 689 setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX)); | |
| 690 lj_assertG(old >= g->gc.total, "sweep increased memory"); | |
| 691 g->gc.estimate -= old - g->gc.total; | |
| 692 if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) { | |
| 693 if (g->str.num <= (g->str.mask >> 2) && g->str.mask > LJ_MIN_STRTAB*2-1) | |
| 694 lj_str_resize(L, g->str.mask >> 1); /* Shrink string table. */ | |
| 695 if (gcref(g->gc.mmudata)) { /* Need any finalizations? */ | |
| 696 g->gc.state = GCSfinalize; | |
| 697 #if LJ_HASFFI | |
| 698 g->gc.nocdatafin = 1; | |
| 699 #endif | |
| 700 } else { /* Otherwise skip this phase to help the JIT. */ | |
| 701 g->gc.state = GCSpause; /* End of GC cycle. */ | |
| 702 g->gc.debt = 0; | |
| 703 } | |
| 704 } | |
| 705 return GCSWEEPMAX*GCSWEEPCOST; | |
| 706 } | |
| 707 case GCSfinalize: | |
| 708 if (gcref(g->gc.mmudata) != NULL) { | |
| 709 GCSize old = g->gc.total; | |
| 710 if (tvref(g->jit_base)) /* Don't call finalizers on trace. */ | |
| 711 return LJ_MAX_MEM; | |
| 712 gc_finalize(L); /* Finalize one userdata object. */ | |
| 713 if (old >= g->gc.total && g->gc.estimate > old - g->gc.total) | |
| 714 g->gc.estimate -= old - g->gc.total; | |
| 715 if (g->gc.estimate > GCFINALIZECOST) | |
| 716 g->gc.estimate -= GCFINALIZECOST; | |
| 717 return GCFINALIZECOST; | |
| 718 } | |
| 719 #if LJ_HASFFI | |
| 720 if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer); | |
| 721 #endif | |
| 722 g->gc.state = GCSpause; /* End of GC cycle. */ | |
| 723 g->gc.debt = 0; | |
| 724 return 0; | |
| 725 default: | |
| 726 lj_assertG(0, "bad GC state"); | |
| 727 return 0; | |
| 728 } | |
| 729 } | |
| 730 | |
| 731 /* Perform a limited amount of incremental GC steps. */ | |
| 732 int LJ_FASTCALL lj_gc_step(lua_State *L) | |
| 733 { | |
| 734 global_State *g = G(L); | |
| 735 GCSize lim; | |
| 736 int32_t ostate = g->vmstate; | |
| 737 setvmstate(g, GC); | |
| 738 lim = (GCSTEPSIZE/100) * g->gc.stepmul; | |
| 739 if (lim == 0) | |
| 740 lim = LJ_MAX_MEM; | |
| 741 if (g->gc.total > g->gc.threshold) | |
| 742 g->gc.debt += g->gc.total - g->gc.threshold; | |
| 743 do { | |
| 744 lim -= (GCSize)gc_onestep(L); | |
| 745 if (g->gc.state == GCSpause) { | |
| 746 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; | |
| 747 g->vmstate = ostate; | |
| 748 return 1; /* Finished a GC cycle. */ | |
| 749 } | |
| 750 } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0)); | |
| 751 if (g->gc.debt < GCSTEPSIZE) { | |
| 752 g->gc.threshold = g->gc.total + GCSTEPSIZE; | |
| 753 g->vmstate = ostate; | |
| 754 return -1; | |
| 755 } else { | |
| 756 g->gc.debt -= GCSTEPSIZE; | |
| 757 g->gc.threshold = g->gc.total; | |
| 758 g->vmstate = ostate; | |
| 759 return 0; | |
| 760 } | |
| 761 } | |
| 762 | |
| 763 /* Ditto, but fix the stack top first. */ | |
| 764 void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L) | |
| 765 { | |
| 766 if (curr_funcisL(L)) L->top = curr_topL(L); | |
| 767 lj_gc_step(L); | |
| 768 } | |
| 769 | |
| 770 #if LJ_HASJIT | |
| 771 /* Perform multiple GC steps. Called from JIT-compiled code. */ | |
| 772 int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps) | |
| 773 { | |
| 774 lua_State *L = gco2th(gcref(g->cur_L)); | |
| 775 L->base = tvref(G(L)->jit_base); | |
| 776 L->top = curr_topL(L); | |
| 777 while (steps-- > 0 && lj_gc_step(L) == 0) | |
| 778 ; | |
| 779 /* Return 1 to force a trace exit. */ | |
| 780 return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize); | |
| 781 } | |
| 782 #endif | |
| 783 | |
| 784 /* Perform a full GC cycle. */ | |
| 785 void lj_gc_fullgc(lua_State *L) | |
| 786 { | |
| 787 global_State *g = G(L); | |
| 788 int32_t ostate = g->vmstate; | |
| 789 setvmstate(g, GC); | |
| 790 if (g->gc.state <= GCSatomic) { /* Caught somewhere in the middle. */ | |
| 791 setmref(g->gc.sweep, &g->gc.root); /* Sweep everything (preserving it). */ | |
| 792 setgcrefnull(g->gc.gray); /* Reset lists from partial propagation. */ | |
| 793 setgcrefnull(g->gc.grayagain); | |
| 794 setgcrefnull(g->gc.weak); | |
| 795 g->gc.state = GCSsweepstring; /* Fast forward to the sweep phase. */ | |
| 796 g->gc.sweepstr = 0; | |
| 797 } | |
| 798 while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep) | |
| 799 gc_onestep(L); /* Finish sweep. */ | |
| 800 lj_assertG(g->gc.state == GCSfinalize || g->gc.state == GCSpause, | |
| 801 "bad GC state"); | |
| 802 /* Now perform a full GC. */ | |
| 803 g->gc.state = GCSpause; | |
| 804 do { gc_onestep(L); } while (g->gc.state != GCSpause); | |
| 805 g->gc.threshold = (g->gc.estimate/100) * g->gc.pause; | |
| 806 g->vmstate = ostate; | |
| 807 } | |
| 808 | |
| 809 /* -- Write barriers ------------------------------------------------------ */ | |
| 810 | |
| 811 /* Move the GC propagation frontier forward. */ | |
| 812 void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v) | |
| 813 { | |
| 814 lj_assertG(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o), | |
| 815 "bad object states for forward barrier"); | |
| 816 lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause, | |
| 817 "bad GC state"); | |
| 818 lj_assertG(o->gch.gct != ~LJ_TTAB, "barrier object is not a table"); | |
| 819 /* Preserve invariant during propagation. Otherwise it doesn't matter. */ | |
| 820 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) | |
| 821 gc_mark(g, v); /* Move frontier forward. */ | |
| 822 else | |
| 823 makewhite(g, o); /* Make it white to avoid the following barrier. */ | |
| 824 } | |
| 825 | |
| 826 /* Specialized barrier for closed upvalue. Pass &uv->tv. */ | |
| 827 void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv) | |
| 828 { | |
| 829 #define TV2MARKED(x) \ | |
| 830 (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked))) | |
| 831 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) | |
| 832 gc_mark(g, gcV(tv)); | |
| 833 else | |
| 834 TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g); | |
| 835 #undef TV2MARKED | |
| 836 } | |
| 837 | |
| 838 /* Close upvalue. Also needs a write barrier. */ | |
| 839 void lj_gc_closeuv(global_State *g, GCupval *uv) | |
| 840 { | |
| 841 GCobj *o = obj2gco(uv); | |
| 842 /* Copy stack slot to upvalue itself and point to the copy. */ | |
| 843 copyTV(mainthread(g), &uv->tv, uvval(uv)); | |
| 844 setmref(uv->v, &uv->tv); | |
| 845 uv->closed = 1; | |
| 846 setgcrefr(o->gch.nextgc, g->gc.root); | |
| 847 setgcref(g->gc.root, o); | |
| 848 if (isgray(o)) { /* A closed upvalue is never gray, so fix this. */ | |
| 849 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) { | |
| 850 gray2black(o); /* Make it black and preserve invariant. */ | |
| 851 if (tviswhite(&uv->tv)) | |
| 852 lj_gc_barrierf(g, o, gcV(&uv->tv)); | |
| 853 } else { | |
| 854 makewhite(g, o); /* Make it white, i.e. sweep the upvalue. */ | |
| 855 lj_assertG(g->gc.state != GCSfinalize && g->gc.state != GCSpause, | |
| 856 "bad GC state"); | |
| 857 } | |
| 858 } | |
| 859 } | |
| 860 | |
| 861 #if LJ_HASJIT | |
| 862 /* Mark a trace if it's saved during the propagation phase. */ | |
| 863 void lj_gc_barriertrace(global_State *g, uint32_t traceno) | |
| 864 { | |
| 865 if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) | |
| 866 gc_marktrace(g, traceno); | |
| 867 } | |
| 868 #endif | |
| 869 | |
| 870 /* -- Allocator ----------------------------------------------------------- */ | |
| 871 | |
| 872 /* Call pluggable memory allocator to allocate or resize a fragment. */ | |
| 873 void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz) | |
| 874 { | |
| 875 global_State *g = G(L); | |
| 876 lj_assertG((osz == 0) == (p == NULL), "realloc API violation"); | |
| 877 p = g->allocf(g->allocd, p, osz, nsz); | |
| 878 if (p == NULL && nsz > 0) | |
| 879 lj_err_mem(L); | |
| 880 lj_assertG((nsz == 0) == (p == NULL), "allocf API violation"); | |
| 881 lj_assertG(checkptrGC(p), | |
| 882 "allocated memory address %p outside required range", p); | |
| 883 g->gc.total = (g->gc.total - osz) + nsz; | |
| 884 return p; | |
| 885 } | |
| 886 | |
| 887 /* Allocate new GC object and link it to the root set. */ | |
| 888 void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size) | |
| 889 { | |
| 890 global_State *g = G(L); | |
| 891 GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size); | |
| 892 if (o == NULL) | |
| 893 lj_err_mem(L); | |
| 894 lj_assertG(checkptrGC(o), | |
| 895 "allocated memory address %p outside required range", o); | |
| 896 g->gc.total += size; | |
| 897 setgcrefr(o->gch.nextgc, g->gc.root); | |
| 898 setgcref(g->gc.root, o); | |
| 899 newwhite(g, o); | |
| 900 return o; | |
| 901 } | |
| 902 | |
| 903 /* Resize growable vector. */ | |
| 904 void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz) | |
| 905 { | |
| 906 MSize sz = (*szp) << 1; | |
| 907 if (sz < LJ_MIN_VECSZ) | |
| 908 sz = LJ_MIN_VECSZ; | |
| 909 if (sz > lim) | |
| 910 sz = lim; | |
| 911 p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz); | |
| 912 *szp = sz; | |
| 913 return p; | |
| 914 } | |
| 915 |