Mercurial
comparison third_party/luajit/src/lj_trace.c @ 178:94705b5986b3
[ThirdParty] Added WRK and luajit for load testing.
| author | MrJuneJune <me@mrjunejune.com> |
|---|---|
| date | Thu, 22 Jan 2026 20:10:30 -0800 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| 177:24fe8ff94056 | 178:94705b5986b3 |
|---|---|
| 1 /* | |
| 2 ** Trace management. | |
| 3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h | |
| 4 */ | |
| 5 | |
| 6 #define lj_trace_c | |
| 7 #define LUA_CORE | |
| 8 | |
| 9 #include "lj_obj.h" | |
| 10 | |
| 11 #if LJ_HASJIT | |
| 12 | |
| 13 #include "lj_gc.h" | |
| 14 #include "lj_err.h" | |
| 15 #include "lj_debug.h" | |
| 16 #include "lj_str.h" | |
| 17 #include "lj_frame.h" | |
| 18 #include "lj_state.h" | |
| 19 #include "lj_bc.h" | |
| 20 #include "lj_ir.h" | |
| 21 #include "lj_jit.h" | |
| 22 #include "lj_iropt.h" | |
| 23 #include "lj_mcode.h" | |
| 24 #include "lj_trace.h" | |
| 25 #include "lj_snap.h" | |
| 26 #include "lj_gdbjit.h" | |
| 27 #include "lj_record.h" | |
| 28 #include "lj_asm.h" | |
| 29 #include "lj_dispatch.h" | |
| 30 #include "lj_vm.h" | |
| 31 #include "lj_vmevent.h" | |
| 32 #include "lj_target.h" | |
| 33 #include "lj_prng.h" | |
| 34 | |
| 35 /* -- Error handling ------------------------------------------------------ */ | |
| 36 | |
| 37 /* Synchronous abort with error message. */ | |
| 38 void lj_trace_err(jit_State *J, TraceError e) | |
| 39 { | |
| 40 setnilV(&J->errinfo); /* No error info. */ | |
| 41 setintV(J->L->top++, (int32_t)e); | |
| 42 lj_err_throw(J->L, LUA_ERRRUN); | |
| 43 } | |
| 44 | |
| 45 /* Synchronous abort with error message and error info. */ | |
| 46 void lj_trace_err_info(jit_State *J, TraceError e) | |
| 47 { | |
| 48 setintV(J->L->top++, (int32_t)e); | |
| 49 lj_err_throw(J->L, LUA_ERRRUN); | |
| 50 } | |
| 51 | |
| 52 /* -- Trace management ---------------------------------------------------- */ | |
| 53 | |
| 54 /* The current trace is first assembled in J->cur. The variable length | |
| 55 ** arrays point to shared, growable buffers (J->irbuf etc.). When trace | |
| 56 ** recording ends successfully, the current trace and its data structures | |
| 57 ** are copied to a new (compact) GCtrace object. | |
| 58 */ | |
| 59 | |
| 60 /* Find a free trace number. */ | |
| 61 static TraceNo trace_findfree(jit_State *J) | |
| 62 { | |
| 63 MSize osz, lim; | |
| 64 if (J->freetrace == 0) | |
| 65 J->freetrace = 1; | |
| 66 for (; J->freetrace < J->sizetrace; J->freetrace++) | |
| 67 if (traceref(J, J->freetrace) == NULL) | |
| 68 return J->freetrace++; | |
| 69 /* Need to grow trace array. */ | |
| 70 lim = (MSize)J->param[JIT_P_maxtrace] + 1; | |
| 71 if (lim < 2) lim = 2; else if (lim > 65535) lim = 65535; | |
| 72 osz = J->sizetrace; | |
| 73 if (osz >= lim) | |
| 74 return 0; /* Too many traces. */ | |
| 75 lj_mem_growvec(J->L, J->trace, J->sizetrace, lim, GCRef); | |
| 76 for (; osz < J->sizetrace; osz++) | |
| 77 setgcrefnull(J->trace[osz]); | |
| 78 return J->freetrace; | |
| 79 } | |
| 80 | |
| 81 #define TRACE_APPENDVEC(field, szfield, tp) \ | |
| 82 T->field = (tp *)p; \ | |
| 83 memcpy(p, J->cur.field, J->cur.szfield*sizeof(tp)); \ | |
| 84 p += J->cur.szfield*sizeof(tp); | |
| 85 | |
| 86 #ifdef LUAJIT_USE_PERFTOOLS | |
| 87 /* | |
| 88 ** Create symbol table of JIT-compiled code. For use with Linux perf tools. | |
| 89 ** Example usage: | |
| 90 ** perf record -f -e cycles luajit test.lua | |
| 91 ** perf report -s symbol | |
| 92 ** rm perf.data /tmp/perf-*.map | |
| 93 */ | |
| 94 #include <stdio.h> | |
| 95 #include <unistd.h> | |
| 96 | |
| 97 static void perftools_addtrace(GCtrace *T) | |
| 98 { | |
| 99 static FILE *fp; | |
| 100 GCproto *pt = &gcref(T->startpt)->pt; | |
| 101 const BCIns *startpc = mref(T->startpc, const BCIns); | |
| 102 const char *name = proto_chunknamestr(pt); | |
| 103 BCLine lineno; | |
| 104 if (name[0] == '@' || name[0] == '=') | |
| 105 name++; | |
| 106 else | |
| 107 name = "(string)"; | |
| 108 lj_assertX(startpc >= proto_bc(pt) && startpc < proto_bc(pt) + pt->sizebc, | |
| 109 "trace PC out of range"); | |
| 110 lineno = lj_debug_line(pt, proto_bcpos(pt, startpc)); | |
| 111 if (!fp) { | |
| 112 char fname[40]; | |
| 113 sprintf(fname, "/tmp/perf-%d.map", getpid()); | |
| 114 if (!(fp = fopen(fname, "w"))) return; | |
| 115 setlinebuf(fp); | |
| 116 } | |
| 117 fprintf(fp, "%lx %x TRACE_%d::%s:%u\n", | |
| 118 (long)T->mcode, T->szmcode, T->traceno, name, lineno); | |
| 119 } | |
| 120 #endif | |
| 121 | |
| 122 /* Allocate space for copy of T. */ | |
| 123 GCtrace * LJ_FASTCALL lj_trace_alloc(lua_State *L, GCtrace *T) | |
| 124 { | |
| 125 size_t sztr = ((sizeof(GCtrace)+7)&~7); | |
| 126 size_t szins = (T->nins-T->nk)*sizeof(IRIns); | |
| 127 size_t sz = sztr + szins + | |
| 128 T->nsnap*sizeof(SnapShot) + | |
| 129 T->nsnapmap*sizeof(SnapEntry); | |
| 130 GCtrace *T2 = lj_mem_newt(L, (MSize)sz, GCtrace); | |
| 131 char *p = (char *)T2 + sztr; | |
| 132 T2->gct = ~LJ_TTRACE; | |
| 133 T2->marked = 0; | |
| 134 T2->traceno = 0; | |
| 135 T2->ir = (IRIns *)p - T->nk; | |
| 136 T2->nins = T->nins; | |
| 137 T2->nk = T->nk; | |
| 138 T2->nsnap = T->nsnap; | |
| 139 T2->nsnapmap = T->nsnapmap; | |
| 140 memcpy(p, T->ir + T->nk, szins); | |
| 141 return T2; | |
| 142 } | |
| 143 | |
| 144 /* Save current trace by copying and compacting it. */ | |
| 145 static void trace_save(jit_State *J, GCtrace *T) | |
| 146 { | |
| 147 size_t sztr = ((sizeof(GCtrace)+7)&~7); | |
| 148 size_t szins = (J->cur.nins-J->cur.nk)*sizeof(IRIns); | |
| 149 char *p = (char *)T + sztr; | |
| 150 memcpy(T, &J->cur, sizeof(GCtrace)); | |
| 151 setgcrefr(T->nextgc, J2G(J)->gc.root); | |
| 152 setgcrefp(J2G(J)->gc.root, T); | |
| 153 newwhite(J2G(J), T); | |
| 154 T->gct = ~LJ_TTRACE; | |
| 155 T->ir = (IRIns *)p - J->cur.nk; /* The IR has already been copied above. */ | |
| 156 #if LJ_ABI_PAUTH | |
| 157 T->mcauth = lj_ptr_sign((ASMFunction)T->mcode, T); | |
| 158 #endif | |
| 159 p += szins; | |
| 160 TRACE_APPENDVEC(snap, nsnap, SnapShot) | |
| 161 TRACE_APPENDVEC(snapmap, nsnapmap, SnapEntry) | |
| 162 J->cur.traceno = 0; | |
| 163 J->curfinal = NULL; | |
| 164 setgcrefp(J->trace[T->traceno], T); | |
| 165 lj_gc_barriertrace(J2G(J), T->traceno); | |
| 166 lj_gdbjit_addtrace(J, T); | |
| 167 #ifdef LUAJIT_USE_PERFTOOLS | |
| 168 perftools_addtrace(T); | |
| 169 #endif | |
| 170 } | |
| 171 | |
| 172 void LJ_FASTCALL lj_trace_free(global_State *g, GCtrace *T) | |
| 173 { | |
| 174 jit_State *J = G2J(g); | |
| 175 if (T->traceno) { | |
| 176 lj_gdbjit_deltrace(J, T); | |
| 177 if (T->traceno < J->freetrace) | |
| 178 J->freetrace = T->traceno; | |
| 179 setgcrefnull(J->trace[T->traceno]); | |
| 180 } | |
| 181 lj_mem_free(g, T, | |
| 182 ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) + | |
| 183 T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry)); | |
| 184 } | |
| 185 | |
| 186 /* Re-enable compiling a prototype by unpatching any modified bytecode. */ | |
| 187 void lj_trace_reenableproto(GCproto *pt) | |
| 188 { | |
| 189 if ((pt->flags & PROTO_ILOOP)) { | |
| 190 BCIns *bc = proto_bc(pt); | |
| 191 BCPos i, sizebc = pt->sizebc; | |
| 192 pt->flags &= ~PROTO_ILOOP; | |
| 193 if (bc_op(bc[0]) == BC_IFUNCF) | |
| 194 setbc_op(&bc[0], BC_FUNCF); | |
| 195 for (i = 1; i < sizebc; i++) { | |
| 196 BCOp op = bc_op(bc[i]); | |
| 197 if (op == BC_IFORL || op == BC_IITERL || op == BC_ILOOP) | |
| 198 setbc_op(&bc[i], (int)op+(int)BC_LOOP-(int)BC_ILOOP); | |
| 199 } | |
| 200 } | |
| 201 } | |
| 202 | |
| 203 /* Unpatch the bytecode modified by a root trace. */ | |
| 204 static void trace_unpatch(jit_State *J, GCtrace *T) | |
| 205 { | |
| 206 BCOp op = bc_op(T->startins); | |
| 207 BCIns *pc = mref(T->startpc, BCIns); | |
| 208 UNUSED(J); | |
| 209 if (op == BC_JMP) | |
| 210 return; /* No need to unpatch branches in parent traces (yet). */ | |
| 211 switch (bc_op(*pc)) { | |
| 212 case BC_JFORL: | |
| 213 lj_assertJ(traceref(J, bc_d(*pc)) == T, "JFORL references other trace"); | |
| 214 *pc = T->startins; | |
| 215 pc += bc_j(T->startins); | |
| 216 lj_assertJ(bc_op(*pc) == BC_JFORI, "FORL does not point to JFORI"); | |
| 217 setbc_op(pc, BC_FORI); | |
| 218 break; | |
| 219 case BC_JITERL: | |
| 220 case BC_JLOOP: | |
| 221 lj_assertJ(op == BC_ITERL || op == BC_ITERN || op == BC_LOOP || | |
| 222 bc_isret(op), "bad original bytecode %d", op); | |
| 223 *pc = T->startins; | |
| 224 break; | |
| 225 case BC_JMP: | |
| 226 lj_assertJ(op == BC_ITERL, "bad original bytecode %d", op); | |
| 227 pc += bc_j(*pc)+2; | |
| 228 if (bc_op(*pc) == BC_JITERL) { | |
| 229 lj_assertJ(traceref(J, bc_d(*pc)) == T, "JITERL references other trace"); | |
| 230 *pc = T->startins; | |
| 231 } | |
| 232 break; | |
| 233 case BC_JFUNCF: | |
| 234 lj_assertJ(op == BC_FUNCF, "bad original bytecode %d", op); | |
| 235 *pc = T->startins; | |
| 236 break; | |
| 237 default: /* Already unpatched. */ | |
| 238 break; | |
| 239 } | |
| 240 } | |
| 241 | |
| 242 /* Flush a root trace. */ | |
| 243 static void trace_flushroot(jit_State *J, GCtrace *T) | |
| 244 { | |
| 245 GCproto *pt = &gcref(T->startpt)->pt; | |
| 246 lj_assertJ(T->root == 0, "not a root trace"); | |
| 247 lj_assertJ(pt != NULL, "trace has no prototype"); | |
| 248 /* First unpatch any modified bytecode. */ | |
| 249 trace_unpatch(J, T); | |
| 250 /* Unlink root trace from chain anchored in prototype. */ | |
| 251 if (pt->trace == T->traceno) { /* Trace is first in chain. Easy. */ | |
| 252 pt->trace = T->nextroot; | |
| 253 } else if (pt->trace) { /* Otherwise search in chain of root traces. */ | |
| 254 GCtrace *T2 = traceref(J, pt->trace); | |
| 255 if (T2) { | |
| 256 for (; T2->nextroot; T2 = traceref(J, T2->nextroot)) | |
| 257 if (T2->nextroot == T->traceno) { | |
| 258 T2->nextroot = T->nextroot; /* Unlink from chain. */ | |
| 259 break; | |
| 260 } | |
| 261 } | |
| 262 } | |
| 263 } | |
| 264 | |
| 265 /* Flush a trace. Only root traces are considered. */ | |
| 266 void lj_trace_flush(jit_State *J, TraceNo traceno) | |
| 267 { | |
| 268 if (traceno > 0 && traceno < J->sizetrace) { | |
| 269 GCtrace *T = traceref(J, traceno); | |
| 270 if (T && T->root == 0) | |
| 271 trace_flushroot(J, T); | |
| 272 } | |
| 273 } | |
| 274 | |
| 275 /* Flush all traces associated with a prototype. */ | |
| 276 void lj_trace_flushproto(global_State *g, GCproto *pt) | |
| 277 { | |
| 278 while (pt->trace != 0) | |
| 279 trace_flushroot(G2J(g), traceref(G2J(g), pt->trace)); | |
| 280 } | |
| 281 | |
| 282 /* Flush all traces. */ | |
| 283 int lj_trace_flushall(lua_State *L) | |
| 284 { | |
| 285 jit_State *J = L2J(L); | |
| 286 ptrdiff_t i; | |
| 287 if ((J2G(J)->hookmask & HOOK_GC)) | |
| 288 return 1; | |
| 289 for (i = (ptrdiff_t)J->sizetrace-1; i > 0; i--) { | |
| 290 GCtrace *T = traceref(J, i); | |
| 291 if (T) { | |
| 292 if (T->root == 0) | |
| 293 trace_flushroot(J, T); | |
| 294 lj_gdbjit_deltrace(J, T); | |
| 295 T->traceno = T->link = 0; /* Blacklist the link for cont_stitch. */ | |
| 296 setgcrefnull(J->trace[i]); | |
| 297 } | |
| 298 } | |
| 299 J->cur.traceno = 0; | |
| 300 J->freetrace = 0; | |
| 301 /* Clear penalty cache. */ | |
| 302 memset(J->penalty, 0, sizeof(J->penalty)); | |
| 303 /* Free the whole machine code and invalidate all exit stub groups. */ | |
| 304 lj_mcode_free(J); | |
| 305 memset(J->exitstubgroup, 0, sizeof(J->exitstubgroup)); | |
| 306 lj_vmevent_send(L, TRACE, | |
| 307 setstrV(L, L->top++, lj_str_newlit(L, "flush")); | |
| 308 ); | |
| 309 return 0; | |
| 310 } | |
| 311 | |
| 312 /* Initialize JIT compiler state. */ | |
| 313 void lj_trace_initstate(global_State *g) | |
| 314 { | |
| 315 jit_State *J = G2J(g); | |
| 316 TValue *tv; | |
| 317 | |
| 318 /* Initialize aligned SIMD constants. */ | |
| 319 tv = LJ_KSIMD(J, LJ_KSIMD_ABS); | |
| 320 tv[0].u64 = U64x(7fffffff,ffffffff); | |
| 321 tv[1].u64 = U64x(7fffffff,ffffffff); | |
| 322 tv = LJ_KSIMD(J, LJ_KSIMD_NEG); | |
| 323 tv[0].u64 = U64x(80000000,00000000); | |
| 324 tv[1].u64 = U64x(80000000,00000000); | |
| 325 | |
| 326 /* Initialize 32/64 bit constants. */ | |
| 327 #if LJ_TARGET_X86ORX64 | |
| 328 J->k64[LJ_K64_TOBIT].u64 = U64x(43380000,00000000); | |
| 329 #if LJ_32 | |
| 330 J->k64[LJ_K64_M2P64_31].u64 = U64x(c1e00000,00000000); | |
| 331 #endif | |
| 332 J->k64[LJ_K64_2P64].u64 = U64x(43f00000,00000000); | |
| 333 J->k32[LJ_K32_M2P64_31] = LJ_64 ? 0xdf800000 : 0xcf000000; | |
| 334 #endif | |
| 335 #if LJ_TARGET_X86ORX64 || LJ_TARGET_MIPS64 | |
| 336 J->k64[LJ_K64_M2P64].u64 = U64x(c3f00000,00000000); | |
| 337 #endif | |
| 338 #if LJ_TARGET_PPC | |
| 339 J->k32[LJ_K32_2P52_2P31] = 0x59800004; | |
| 340 J->k32[LJ_K32_2P52] = 0x59800000; | |
| 341 #endif | |
| 342 #if LJ_TARGET_PPC || LJ_TARGET_MIPS | |
| 343 J->k32[LJ_K32_2P31] = 0x4f000000; | |
| 344 #endif | |
| 345 #if LJ_TARGET_MIPS | |
| 346 J->k64[LJ_K64_2P31].u64 = U64x(41e00000,00000000); | |
| 347 #if LJ_64 | |
| 348 J->k64[LJ_K64_2P63].u64 = U64x(43e00000,00000000); | |
| 349 J->k32[LJ_K32_2P63] = 0x5f000000; | |
| 350 J->k32[LJ_K32_M2P64] = 0xdf800000; | |
| 351 #endif | |
| 352 #endif | |
| 353 } | |
| 354 | |
| 355 /* Free everything associated with the JIT compiler state. */ | |
| 356 void lj_trace_freestate(global_State *g) | |
| 357 { | |
| 358 jit_State *J = G2J(g); | |
| 359 #ifdef LUA_USE_ASSERT | |
| 360 { /* This assumes all traces have already been freed. */ | |
| 361 ptrdiff_t i; | |
| 362 for (i = 1; i < (ptrdiff_t)J->sizetrace; i++) | |
| 363 lj_assertG(i == (ptrdiff_t)J->cur.traceno || traceref(J, i) == NULL, | |
| 364 "trace still allocated"); | |
| 365 } | |
| 366 #endif | |
| 367 lj_mcode_free(J); | |
| 368 lj_mem_freevec(g, J->snapmapbuf, J->sizesnapmap, SnapEntry); | |
| 369 lj_mem_freevec(g, J->snapbuf, J->sizesnap, SnapShot); | |
| 370 lj_mem_freevec(g, J->irbuf + J->irbotlim, J->irtoplim - J->irbotlim, IRIns); | |
| 371 lj_mem_freevec(g, J->trace, J->sizetrace, GCRef); | |
| 372 } | |
| 373 | |
| 374 /* -- Penalties and blacklisting ------------------------------------------ */ | |
| 375 | |
| 376 /* Blacklist a bytecode instruction. */ | |
| 377 static void blacklist_pc(GCproto *pt, BCIns *pc) | |
| 378 { | |
| 379 if (bc_op(*pc) == BC_ITERN) { | |
| 380 setbc_op(pc, BC_ITERC); | |
| 381 setbc_op(pc+1+bc_j(pc[1]), BC_JMP); | |
| 382 } else { | |
| 383 setbc_op(pc, (int)bc_op(*pc)+(int)BC_ILOOP-(int)BC_LOOP); | |
| 384 pt->flags |= PROTO_ILOOP; | |
| 385 } | |
| 386 } | |
| 387 | |
| 388 /* Penalize a bytecode instruction. */ | |
| 389 static void penalty_pc(jit_State *J, GCproto *pt, BCIns *pc, TraceError e) | |
| 390 { | |
| 391 uint32_t i, val = PENALTY_MIN; | |
| 392 for (i = 0; i < PENALTY_SLOTS; i++) | |
| 393 if (mref(J->penalty[i].pc, const BCIns) == pc) { /* Cache slot found? */ | |
| 394 /* First try to bump its hotcount several times. */ | |
| 395 val = ((uint32_t)J->penalty[i].val << 1) + | |
| 396 (lj_prng_u64(&J2G(J)->prng) & ((1u<<PENALTY_RNDBITS)-1)); | |
| 397 if (val > PENALTY_MAX) { | |
| 398 blacklist_pc(pt, pc); /* Blacklist it, if that didn't help. */ | |
| 399 return; | |
| 400 } | |
| 401 goto setpenalty; | |
| 402 } | |
| 403 /* Assign a new penalty cache slot. */ | |
| 404 i = J->penaltyslot; | |
| 405 J->penaltyslot = (J->penaltyslot + 1) & (PENALTY_SLOTS-1); | |
| 406 setmref(J->penalty[i].pc, pc); | |
| 407 setpenalty: | |
| 408 J->penalty[i].val = (uint16_t)val; | |
| 409 J->penalty[i].reason = e; | |
| 410 hotcount_set(J2GG(J), pc+1, val); | |
| 411 } | |
| 412 | |
| 413 /* -- Trace compiler state machine ---------------------------------------- */ | |
| 414 | |
| 415 /* Start tracing. */ | |
| 416 static void trace_start(jit_State *J) | |
| 417 { | |
| 418 lua_State *L; | |
| 419 TraceNo traceno; | |
| 420 | |
| 421 if ((J->pt->flags & PROTO_NOJIT)) { /* JIT disabled for this proto? */ | |
| 422 if (J->parent == 0 && J->exitno == 0 && bc_op(*J->pc) != BC_ITERN) { | |
| 423 /* Lazy bytecode patching to disable hotcount events. */ | |
| 424 lj_assertJ(bc_op(*J->pc) == BC_FORL || bc_op(*J->pc) == BC_ITERL || | |
| 425 bc_op(*J->pc) == BC_LOOP || bc_op(*J->pc) == BC_FUNCF, | |
| 426 "bad hot bytecode %d", bc_op(*J->pc)); | |
| 427 setbc_op(J->pc, (int)bc_op(*J->pc)+(int)BC_ILOOP-(int)BC_LOOP); | |
| 428 J->pt->flags |= PROTO_ILOOP; | |
| 429 } | |
| 430 J->state = LJ_TRACE_IDLE; /* Silently ignored. */ | |
| 431 return; | |
| 432 } | |
| 433 | |
| 434 /* Ensuring forward progress for BC_ITERN can trigger hotcount again. */ | |
| 435 if (!J->parent && bc_op(*J->pc) == BC_JLOOP) { /* Already compiled. */ | |
| 436 J->state = LJ_TRACE_IDLE; /* Silently ignored. */ | |
| 437 return; | |
| 438 } | |
| 439 | |
| 440 /* Get a new trace number. */ | |
| 441 traceno = trace_findfree(J); | |
| 442 if (LJ_UNLIKELY(traceno == 0)) { /* No free trace? */ | |
| 443 lj_assertJ((J2G(J)->hookmask & HOOK_GC) == 0, | |
| 444 "recorder called from GC hook"); | |
| 445 lj_trace_flushall(J->L); | |
| 446 J->state = LJ_TRACE_IDLE; /* Silently ignored. */ | |
| 447 return; | |
| 448 } | |
| 449 setgcrefp(J->trace[traceno], &J->cur); | |
| 450 | |
| 451 /* Setup enough of the current trace to be able to send the vmevent. */ | |
| 452 memset(&J->cur, 0, sizeof(GCtrace)); | |
| 453 J->cur.traceno = traceno; | |
| 454 J->cur.nins = J->cur.nk = REF_BASE; | |
| 455 J->cur.ir = J->irbuf; | |
| 456 J->cur.snap = J->snapbuf; | |
| 457 J->cur.snapmap = J->snapmapbuf; | |
| 458 J->mergesnap = 0; | |
| 459 J->needsnap = 0; | |
| 460 J->bcskip = 0; | |
| 461 J->guardemit.irt = 0; | |
| 462 J->postproc = LJ_POST_NONE; | |
| 463 lj_resetsplit(J); | |
| 464 J->retryrec = 0; | |
| 465 J->ktrace = 0; | |
| 466 setgcref(J->cur.startpt, obj2gco(J->pt)); | |
| 467 | |
| 468 L = J->L; | |
| 469 lj_vmevent_send(L, TRACE, | |
| 470 setstrV(L, L->top++, lj_str_newlit(L, "start")); | |
| 471 setintV(L->top++, traceno); | |
| 472 setfuncV(L, L->top++, J->fn); | |
| 473 setintV(L->top++, proto_bcpos(J->pt, J->pc)); | |
| 474 if (J->parent) { | |
| 475 setintV(L->top++, J->parent); | |
| 476 setintV(L->top++, J->exitno); | |
| 477 } else { | |
| 478 BCOp op = bc_op(*J->pc); | |
| 479 if (op == BC_CALLM || op == BC_CALL || op == BC_ITERC) { | |
| 480 setintV(L->top++, J->exitno); /* Parent of stitched trace. */ | |
| 481 setintV(L->top++, -1); | |
| 482 } | |
| 483 } | |
| 484 ); | |
| 485 lj_record_setup(J); | |
| 486 } | |
| 487 | |
| 488 /* Stop tracing. */ | |
| 489 static void trace_stop(jit_State *J) | |
| 490 { | |
| 491 BCIns *pc = mref(J->cur.startpc, BCIns); | |
| 492 BCOp op = bc_op(J->cur.startins); | |
| 493 GCproto *pt = &gcref(J->cur.startpt)->pt; | |
| 494 TraceNo traceno = J->cur.traceno; | |
| 495 GCtrace *T = J->curfinal; | |
| 496 lua_State *L; | |
| 497 | |
| 498 switch (op) { | |
| 499 case BC_FORL: | |
| 500 setbc_op(pc+bc_j(J->cur.startins), BC_JFORI); /* Patch FORI, too. */ | |
| 501 /* fallthrough */ | |
| 502 case BC_LOOP: | |
| 503 case BC_ITERL: | |
| 504 case BC_FUNCF: | |
| 505 /* Patch bytecode of starting instruction in root trace. */ | |
| 506 setbc_op(pc, (int)op+(int)BC_JLOOP-(int)BC_LOOP); | |
| 507 setbc_d(pc, traceno); | |
| 508 addroot: | |
| 509 /* Add to root trace chain in prototype. */ | |
| 510 J->cur.nextroot = pt->trace; | |
| 511 pt->trace = (TraceNo1)traceno; | |
| 512 break; | |
| 513 case BC_ITERN: | |
| 514 case BC_RET: | |
| 515 case BC_RET0: | |
| 516 case BC_RET1: | |
| 517 *pc = BCINS_AD(BC_JLOOP, J->cur.snap[0].nslots, traceno); | |
| 518 goto addroot; | |
| 519 case BC_JMP: | |
| 520 /* Patch exit branch in parent to side trace entry. */ | |
| 521 lj_assertJ(J->parent != 0 && J->cur.root != 0, "not a side trace"); | |
| 522 lj_asm_patchexit(J, traceref(J, J->parent), J->exitno, J->cur.mcode); | |
| 523 /* Avoid compiling a side trace twice (stack resizing uses parent exit). */ | |
| 524 { | |
| 525 SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno]; | |
| 526 snap->count = SNAPCOUNT_DONE; | |
| 527 if (J->cur.topslot > snap->topslot) snap->topslot = J->cur.topslot; | |
| 528 } | |
| 529 /* Add to side trace chain in root trace. */ | |
| 530 { | |
| 531 GCtrace *root = traceref(J, J->cur.root); | |
| 532 root->nchild++; | |
| 533 J->cur.nextside = root->nextside; | |
| 534 root->nextside = (TraceNo1)traceno; | |
| 535 } | |
| 536 break; | |
| 537 case BC_CALLM: | |
| 538 case BC_CALL: | |
| 539 case BC_ITERC: | |
| 540 /* Trace stitching: patch link of previous trace. */ | |
| 541 traceref(J, J->exitno)->link = traceno; | |
| 542 break; | |
| 543 default: | |
| 544 lj_assertJ(0, "bad stop bytecode %d", op); | |
| 545 break; | |
| 546 } | |
| 547 | |
| 548 /* Commit new mcode only after all patching is done. */ | |
| 549 lj_mcode_commit(J, J->cur.mcode); | |
| 550 J->postproc = LJ_POST_NONE; | |
| 551 trace_save(J, T); | |
| 552 | |
| 553 L = J->L; | |
| 554 lj_vmevent_send(L, TRACE, | |
| 555 setstrV(L, L->top++, lj_str_newlit(L, "stop")); | |
| 556 setintV(L->top++, traceno); | |
| 557 setfuncV(L, L->top++, J->fn); | |
| 558 ); | |
| 559 } | |
| 560 | |
| 561 /* Start a new root trace for down-recursion. */ | |
| 562 static int trace_downrec(jit_State *J) | |
| 563 { | |
| 564 /* Restart recording at the return instruction. */ | |
| 565 lj_assertJ(J->pt != NULL, "no active prototype"); | |
| 566 lj_assertJ(bc_isret(bc_op(*J->pc)), "not at a return bytecode"); | |
| 567 if (bc_op(*J->pc) == BC_RETM) | |
| 568 return 0; /* NYI: down-recursion with RETM. */ | |
| 569 J->parent = 0; | |
| 570 J->exitno = 0; | |
| 571 J->state = LJ_TRACE_RECORD; | |
| 572 trace_start(J); | |
| 573 return 1; | |
| 574 } | |
| 575 | |
| 576 /* Abort tracing. */ | |
| 577 static int trace_abort(jit_State *J) | |
| 578 { | |
| 579 lua_State *L = J->L; | |
| 580 TraceError e = LJ_TRERR_RECERR; | |
| 581 TraceNo traceno; | |
| 582 | |
| 583 J->postproc = LJ_POST_NONE; | |
| 584 lj_mcode_abort(J); | |
| 585 if (J->curfinal) { | |
| 586 lj_trace_free(J2G(J), J->curfinal); | |
| 587 J->curfinal = NULL; | |
| 588 } | |
| 589 if (tvisnumber(L->top-1)) | |
| 590 e = (TraceError)numberVint(L->top-1); | |
| 591 if (e == LJ_TRERR_MCODELM) { | |
| 592 L->top--; /* Remove error object */ | |
| 593 J->state = LJ_TRACE_ASM; | |
| 594 return 1; /* Retry ASM with new MCode area. */ | |
| 595 } | |
| 596 /* Penalize or blacklist starting bytecode instruction. */ | |
| 597 if (J->parent == 0 && !bc_isret(bc_op(J->cur.startins))) { | |
| 598 if (J->exitno == 0) { | |
| 599 BCIns *startpc = mref(J->cur.startpc, BCIns); | |
| 600 if (e == LJ_TRERR_RETRY) | |
| 601 hotcount_set(J2GG(J), startpc+1, 1); /* Immediate retry. */ | |
| 602 else | |
| 603 penalty_pc(J, &gcref(J->cur.startpt)->pt, startpc, e); | |
| 604 } else { | |
| 605 traceref(J, J->exitno)->link = J->exitno; /* Self-link is blacklisted. */ | |
| 606 } | |
| 607 } | |
| 608 | |
| 609 /* Is there anything to abort? */ | |
| 610 traceno = J->cur.traceno; | |
| 611 if (traceno) { | |
| 612 ptrdiff_t errobj = savestack(L, L->top-1); /* Stack may be resized. */ | |
| 613 J->cur.link = 0; | |
| 614 J->cur.linktype = LJ_TRLINK_NONE; | |
| 615 lj_vmevent_send(L, TRACE, | |
| 616 TValue *frame; | |
| 617 const BCIns *pc; | |
| 618 GCfunc *fn; | |
| 619 setstrV(L, L->top++, lj_str_newlit(L, "abort")); | |
| 620 setintV(L->top++, traceno); | |
| 621 /* Find original Lua function call to generate a better error message. */ | |
| 622 frame = J->L->base-1; | |
| 623 pc = J->pc; | |
| 624 while (!isluafunc(frame_func(frame))) { | |
| 625 pc = (frame_iscont(frame) ? frame_contpc(frame) : frame_pc(frame)) - 1; | |
| 626 frame = frame_prev(frame); | |
| 627 } | |
| 628 fn = frame_func(frame); | |
| 629 setfuncV(L, L->top++, fn); | |
| 630 setintV(L->top++, proto_bcpos(funcproto(fn), pc)); | |
| 631 copyTV(L, L->top++, restorestack(L, errobj)); | |
| 632 copyTV(L, L->top++, &J->errinfo); | |
| 633 ); | |
| 634 /* Drop aborted trace after the vmevent (which may still access it). */ | |
| 635 setgcrefnull(J->trace[traceno]); | |
| 636 if (traceno < J->freetrace) | |
| 637 J->freetrace = traceno; | |
| 638 J->cur.traceno = 0; | |
| 639 } | |
| 640 L->top--; /* Remove error object */ | |
| 641 if (e == LJ_TRERR_DOWNREC) | |
| 642 return trace_downrec(J); | |
| 643 else if (e == LJ_TRERR_MCODEAL) | |
| 644 lj_trace_flushall(L); | |
| 645 return 0; | |
| 646 } | |
| 647 | |
| 648 /* Perform pending re-patch of a bytecode instruction. */ | |
| 649 static LJ_AINLINE void trace_pendpatch(jit_State *J, int force) | |
| 650 { | |
| 651 if (LJ_UNLIKELY(J->patchpc)) { | |
| 652 if (force || J->bcskip == 0) { | |
| 653 *J->patchpc = J->patchins; | |
| 654 J->patchpc = NULL; | |
| 655 } else { | |
| 656 J->bcskip = 0; | |
| 657 } | |
| 658 } | |
| 659 } | |
| 660 | |
| 661 /* State machine for the trace compiler. Protected callback. */ | |
| 662 static TValue *trace_state(lua_State *L, lua_CFunction dummy, void *ud) | |
| 663 { | |
| 664 jit_State *J = (jit_State *)ud; | |
| 665 UNUSED(dummy); | |
| 666 do { | |
| 667 retry: | |
| 668 switch (J->state) { | |
| 669 case LJ_TRACE_START: | |
| 670 J->state = LJ_TRACE_RECORD; /* trace_start() may change state. */ | |
| 671 trace_start(J); | |
| 672 lj_dispatch_update(J2G(J)); | |
| 673 if (J->state != LJ_TRACE_RECORD_1ST) | |
| 674 break; | |
| 675 /* fallthrough */ | |
| 676 | |
| 677 case LJ_TRACE_RECORD_1ST: | |
| 678 J->state = LJ_TRACE_RECORD; | |
| 679 /* fallthrough */ | |
| 680 case LJ_TRACE_RECORD: | |
| 681 trace_pendpatch(J, 0); | |
| 682 setvmstate(J2G(J), RECORD); | |
| 683 lj_vmevent_send_(L, RECORD, | |
| 684 /* Save/restore state for trace recorder. */ | |
| 685 TValue savetv = J2G(J)->tmptv; | |
| 686 TValue savetv2 = J2G(J)->tmptv2; | |
| 687 TraceNo parent = J->parent; | |
| 688 ExitNo exitno = J->exitno; | |
| 689 setintV(L->top++, J->cur.traceno); | |
| 690 setfuncV(L, L->top++, J->fn); | |
| 691 setintV(L->top++, J->pt ? (int32_t)proto_bcpos(J->pt, J->pc) : -1); | |
| 692 setintV(L->top++, J->framedepth); | |
| 693 , | |
| 694 J2G(J)->tmptv = savetv; | |
| 695 J2G(J)->tmptv2 = savetv2; | |
| 696 J->parent = parent; | |
| 697 J->exitno = exitno; | |
| 698 ); | |
| 699 lj_record_ins(J); | |
| 700 break; | |
| 701 | |
| 702 case LJ_TRACE_END: | |
| 703 trace_pendpatch(J, 1); | |
| 704 J->loopref = 0; | |
| 705 if ((J->flags & JIT_F_OPT_LOOP) && | |
| 706 J->cur.link == J->cur.traceno && J->framedepth + J->retdepth == 0) { | |
| 707 setvmstate(J2G(J), OPT); | |
| 708 lj_opt_dce(J); | |
| 709 if (lj_opt_loop(J)) { /* Loop optimization failed? */ | |
| 710 J->cur.link = 0; | |
| 711 J->cur.linktype = LJ_TRLINK_NONE; | |
| 712 J->loopref = J->cur.nins; | |
| 713 J->state = LJ_TRACE_RECORD; /* Try to continue recording. */ | |
| 714 break; | |
| 715 } | |
| 716 J->loopref = J->chain[IR_LOOP]; /* Needed by assembler. */ | |
| 717 } | |
| 718 lj_opt_split(J); | |
| 719 lj_opt_sink(J); | |
| 720 if (!J->loopref) J->cur.snap[J->cur.nsnap-1].count = SNAPCOUNT_DONE; | |
| 721 J->state = LJ_TRACE_ASM; | |
| 722 break; | |
| 723 | |
| 724 case LJ_TRACE_ASM: | |
| 725 setvmstate(J2G(J), ASM); | |
| 726 lj_asm_trace(J, &J->cur); | |
| 727 trace_stop(J); | |
| 728 setvmstate(J2G(J), INTERP); | |
| 729 J->state = LJ_TRACE_IDLE; | |
| 730 lj_dispatch_update(J2G(J)); | |
| 731 return NULL; | |
| 732 | |
| 733 default: /* Trace aborted asynchronously. */ | |
| 734 setintV(L->top++, (int32_t)LJ_TRERR_RECERR); | |
| 735 /* fallthrough */ | |
| 736 case LJ_TRACE_ERR: | |
| 737 trace_pendpatch(J, 1); | |
| 738 if (trace_abort(J)) | |
| 739 goto retry; | |
| 740 setvmstate(J2G(J), INTERP); | |
| 741 J->state = LJ_TRACE_IDLE; | |
| 742 lj_dispatch_update(J2G(J)); | |
| 743 return NULL; | |
| 744 } | |
| 745 } while (J->state > LJ_TRACE_RECORD); | |
| 746 return NULL; | |
| 747 } | |
| 748 | |
| 749 /* -- Event handling ------------------------------------------------------ */ | |
| 750 | |
| 751 /* A bytecode instruction is about to be executed. Record it. */ | |
| 752 void lj_trace_ins(jit_State *J, const BCIns *pc) | |
| 753 { | |
| 754 /* Note: J->L must already be set. pc is the true bytecode PC here. */ | |
| 755 J->pc = pc; | |
| 756 J->fn = curr_func(J->L); | |
| 757 J->pt = isluafunc(J->fn) ? funcproto(J->fn) : NULL; | |
| 758 while (lj_vm_cpcall(J->L, NULL, (void *)J, trace_state) != 0) | |
| 759 J->state = LJ_TRACE_ERR; | |
| 760 } | |
| 761 | |
| 762 /* A hotcount triggered. Start recording a root trace. */ | |
| 763 void LJ_FASTCALL lj_trace_hot(jit_State *J, const BCIns *pc) | |
| 764 { | |
| 765 /* Note: pc is the interpreter bytecode PC here. It's offset by 1. */ | |
| 766 ERRNO_SAVE | |
| 767 /* Reset hotcount. */ | |
| 768 hotcount_set(J2GG(J), pc, J->param[JIT_P_hotloop]*HOTCOUNT_LOOP); | |
| 769 /* Only start a new trace if not recording or inside __gc call or vmevent. */ | |
| 770 if (J->state == LJ_TRACE_IDLE && | |
| 771 !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) { | |
| 772 J->parent = 0; /* Root trace. */ | |
| 773 J->exitno = 0; | |
| 774 J->state = LJ_TRACE_START; | |
| 775 lj_trace_ins(J, pc-1); | |
| 776 } | |
| 777 ERRNO_RESTORE | |
| 778 } | |
| 779 | |
| 780 /* Check for a hot side exit. If yes, start recording a side trace. */ | |
| 781 static void trace_hotside(jit_State *J, const BCIns *pc) | |
| 782 { | |
| 783 SnapShot *snap = &traceref(J, J->parent)->snap[J->exitno]; | |
| 784 if (!(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT)) && | |
| 785 isluafunc(curr_func(J->L)) && | |
| 786 snap->count != SNAPCOUNT_DONE && | |
| 787 ++snap->count >= J->param[JIT_P_hotexit]) { | |
| 788 lj_assertJ(J->state == LJ_TRACE_IDLE, "hot side exit while recording"); | |
| 789 /* J->parent is non-zero for a side trace. */ | |
| 790 J->state = LJ_TRACE_START; | |
| 791 lj_trace_ins(J, pc); | |
| 792 } | |
| 793 } | |
| 794 | |
| 795 /* Stitch a new trace to the previous trace. */ | |
| 796 void LJ_FASTCALL lj_trace_stitch(jit_State *J, const BCIns *pc) | |
| 797 { | |
| 798 /* Only start a new trace if not recording or inside __gc call or vmevent. */ | |
| 799 if (J->state == LJ_TRACE_IDLE && | |
| 800 !(J2G(J)->hookmask & (HOOK_GC|HOOK_VMEVENT))) { | |
| 801 J->parent = 0; /* Have to treat it like a root trace. */ | |
| 802 /* J->exitno is set to the invoking trace. */ | |
| 803 J->state = LJ_TRACE_START; | |
| 804 lj_trace_ins(J, pc); | |
| 805 } | |
| 806 } | |
| 807 | |
| 808 | |
| 809 /* Tiny struct to pass data to protected call. */ | |
| 810 typedef struct ExitDataCP { | |
| 811 jit_State *J; | |
| 812 void *exptr; /* Pointer to exit state. */ | |
| 813 const BCIns *pc; /* Restart interpreter at this PC. */ | |
| 814 } ExitDataCP; | |
| 815 | |
| 816 /* Need to protect lj_snap_restore because it may throw. */ | |
| 817 static TValue *trace_exit_cp(lua_State *L, lua_CFunction dummy, void *ud) | |
| 818 { | |
| 819 ExitDataCP *exd = (ExitDataCP *)ud; | |
| 820 /* Always catch error here and don't call error function. */ | |
| 821 cframe_errfunc(L->cframe) = 0; | |
| 822 cframe_nres(L->cframe) = -2*LUAI_MAXSTACK*(int)sizeof(TValue); | |
| 823 exd->pc = lj_snap_restore(exd->J, exd->exptr); | |
| 824 UNUSED(dummy); | |
| 825 return NULL; | |
| 826 } | |
| 827 | |
| 828 #ifndef LUAJIT_DISABLE_VMEVENT | |
| 829 /* Push all registers from exit state. */ | |
| 830 static void trace_exit_regs(lua_State *L, ExitState *ex) | |
| 831 { | |
| 832 int32_t i; | |
| 833 setintV(L->top++, RID_NUM_GPR); | |
| 834 setintV(L->top++, RID_NUM_FPR); | |
| 835 for (i = 0; i < RID_NUM_GPR; i++) { | |
| 836 if (sizeof(ex->gpr[i]) == sizeof(int32_t)) | |
| 837 setintV(L->top++, (int32_t)ex->gpr[i]); | |
| 838 else | |
| 839 setnumV(L->top++, (lua_Number)ex->gpr[i]); | |
| 840 } | |
| 841 #if !LJ_SOFTFP | |
| 842 for (i = 0; i < RID_NUM_FPR; i++) { | |
| 843 setnumV(L->top, ex->fpr[i]); | |
| 844 if (LJ_UNLIKELY(tvisnan(L->top))) | |
| 845 setnanV(L->top); | |
| 846 L->top++; | |
| 847 } | |
| 848 #endif | |
| 849 } | |
| 850 #endif | |
| 851 | |
| 852 #if defined(EXITSTATE_PCREG) || (LJ_UNWIND_JIT && !EXITTRACE_VMSTATE) | |
| 853 /* Determine trace number from pc of exit instruction. */ | |
| 854 static TraceNo trace_exit_find(jit_State *J, MCode *pc) | |
| 855 { | |
| 856 TraceNo traceno; | |
| 857 for (traceno = 1; traceno < J->sizetrace; traceno++) { | |
| 858 GCtrace *T = traceref(J, traceno); | |
| 859 if (T && pc >= T->mcode && pc < (MCode *)((char *)T->mcode + T->szmcode)) | |
| 860 return traceno; | |
| 861 } | |
| 862 lj_assertJ(0, "bad exit pc"); | |
| 863 return 0; | |
| 864 } | |
| 865 #endif | |
| 866 | |
| 867 /* A trace exited. Restore interpreter state. */ | |
| 868 int LJ_FASTCALL lj_trace_exit(jit_State *J, void *exptr) | |
| 869 { | |
| 870 ERRNO_SAVE | |
| 871 lua_State *L = J->L; | |
| 872 ExitState *ex = (ExitState *)exptr; | |
| 873 ExitDataCP exd; | |
| 874 int errcode, exitcode = J->exitcode; | |
| 875 TValue exiterr; | |
| 876 const BCIns *pc, *retpc; | |
| 877 void *cf; | |
| 878 GCtrace *T; | |
| 879 | |
| 880 setnilV(&exiterr); | |
| 881 if (exitcode) { /* Trace unwound with error code. */ | |
| 882 J->exitcode = 0; | |
| 883 copyTV(L, &exiterr, L->top-1); | |
| 884 } | |
| 885 | |
| 886 #ifdef EXITSTATE_PCREG | |
| 887 J->parent = trace_exit_find(J, (MCode *)(intptr_t)ex->gpr[EXITSTATE_PCREG]); | |
| 888 #endif | |
| 889 T = traceref(J, J->parent); UNUSED(T); | |
| 890 #ifdef EXITSTATE_CHECKEXIT | |
| 891 if (J->exitno == T->nsnap) { /* Treat stack check like a parent exit. */ | |
| 892 lj_assertJ(T->root != 0, "stack check in root trace"); | |
| 893 J->exitno = T->ir[REF_BASE].op2; | |
| 894 J->parent = T->ir[REF_BASE].op1; | |
| 895 T = traceref(J, J->parent); | |
| 896 } | |
| 897 #endif | |
| 898 lj_assertJ(T != NULL && J->exitno < T->nsnap, "bad trace or exit number"); | |
| 899 exd.J = J; | |
| 900 exd.exptr = exptr; | |
| 901 errcode = lj_vm_cpcall(L, NULL, &exd, trace_exit_cp); | |
| 902 if (errcode) | |
| 903 return -errcode; /* Return negated error code. */ | |
| 904 | |
| 905 if (exitcode) copyTV(L, L->top++, &exiterr); /* Anchor the error object. */ | |
| 906 | |
| 907 if (!(LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE))) | |
| 908 lj_vmevent_send(L, TEXIT, | |
| 909 lj_state_checkstack(L, 4+RID_NUM_GPR+RID_NUM_FPR+LUA_MINSTACK); | |
| 910 setintV(L->top++, J->parent); | |
| 911 setintV(L->top++, J->exitno); | |
| 912 trace_exit_regs(L, ex); | |
| 913 ); | |
| 914 | |
| 915 pc = exd.pc; | |
| 916 cf = cframe_raw(L->cframe); | |
| 917 setcframe_pc(cf, pc); | |
| 918 if (exitcode) { | |
| 919 return -exitcode; | |
| 920 } else if (LJ_HASPROFILE && (G(L)->hookmask & HOOK_PROFILE)) { | |
| 921 /* Just exit to interpreter. */ | |
| 922 } else if (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize) { | |
| 923 if (!(G(L)->hookmask & HOOK_GC)) | |
| 924 lj_gc_step(L); /* Exited because of GC: drive GC forward. */ | |
| 925 } else { | |
| 926 trace_hotside(J, pc); | |
| 927 } | |
| 928 /* Return MULTRES or 0 or -17. */ | |
| 929 ERRNO_RESTORE | |
| 930 switch (bc_op(*pc)) { | |
| 931 case BC_CALLM: case BC_CALLMT: | |
| 932 return (int)((BCReg)(L->top - L->base) - bc_a(*pc) - bc_c(*pc) - LJ_FR2); | |
| 933 case BC_RETM: | |
| 934 return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc) - bc_d(*pc)); | |
| 935 case BC_TSETM: | |
| 936 return (int)((BCReg)(L->top - L->base) + 1 - bc_a(*pc)); | |
| 937 case BC_JLOOP: | |
| 938 retpc = &traceref(J, bc_d(*pc))->startins; | |
| 939 if (bc_isret(bc_op(*retpc)) || bc_op(*retpc) == BC_ITERN) { | |
| 940 /* Dispatch to original ins to ensure forward progress. */ | |
| 941 if (J->state != LJ_TRACE_RECORD) return -17; | |
| 942 /* Unpatch bytecode when recording. */ | |
| 943 J->patchins = *pc; | |
| 944 J->patchpc = (BCIns *)pc; | |
| 945 *J->patchpc = *retpc; | |
| 946 J->bcskip = 1; | |
| 947 } | |
| 948 return 0; | |
| 949 default: | |
| 950 if (bc_op(*pc) >= BC_FUNCF) | |
| 951 return (int)((BCReg)(L->top - L->base) + 1); | |
| 952 return 0; | |
| 953 } | |
| 954 } | |
| 955 | |
| 956 #if LJ_UNWIND_JIT | |
| 957 /* Given an mcode address determine trace exit address for unwinding. */ | |
| 958 uintptr_t LJ_FASTCALL lj_trace_unwind(jit_State *J, uintptr_t addr, ExitNo *ep) | |
| 959 { | |
| 960 #if EXITTRACE_VMSTATE | |
| 961 TraceNo traceno = J2G(J)->vmstate; | |
| 962 #else | |
| 963 TraceNo traceno = trace_exit_find(J, (MCode *)addr); | |
| 964 #endif | |
| 965 GCtrace *T = traceref(J, traceno); | |
| 966 if (T | |
| 967 #if EXITTRACE_VMSTATE | |
| 968 && addr >= (uintptr_t)T->mcode && addr < (uintptr_t)T->mcode + T->szmcode | |
| 969 #endif | |
| 970 ) { | |
| 971 SnapShot *snap = T->snap; | |
| 972 SnapNo lo = 0, exitno = T->nsnap; | |
| 973 uintptr_t ofs = (uintptr_t)((MCode *)addr - T->mcode); /* MCode units! */ | |
| 974 /* Rightmost binary search for mcode offset to determine exit number. */ | |
| 975 do { | |
| 976 SnapNo mid = (lo+exitno) >> 1; | |
| 977 if (ofs < snap[mid].mcofs) exitno = mid; else lo = mid + 1; | |
| 978 } while (lo < exitno); | |
| 979 exitno--; | |
| 980 *ep = exitno; | |
| 981 #ifdef EXITSTUBS_PER_GROUP | |
| 982 return (uintptr_t)exitstub_addr(J, exitno); | |
| 983 #else | |
| 984 return (uintptr_t)exitstub_trace_addr(T, exitno); | |
| 985 #endif | |
| 986 } | |
| 987 /* Cannot correlate addr with trace/exit. This will be fatal. */ | |
| 988 lj_assertJ(0, "bad exit pc"); | |
| 989 return 0; | |
| 990 } | |
| 991 #endif | |
| 992 | |
| 993 #endif |