Mercurial
comparison third_party/luajit/src/lj_asm_arm64.h @ 178:94705b5986b3
[ThirdParty] Added WRK and luajit for load testing.
| author | MrJuneJune <me@mrjunejune.com> |
|---|---|
| date | Thu, 22 Jan 2026 20:10:30 -0800 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| 177:24fe8ff94056 | 178:94705b5986b3 |
|---|---|
| 1 /* | |
| 2 ** ARM64 IR assembler (SSA IR -> machine code). | |
| 3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h | |
| 4 ** | |
| 5 ** Contributed by Djordje Kovacevic and Stefan Pejic from RT-RK.com. | |
| 6 ** Sponsored by Cisco Systems, Inc. | |
| 7 */ | |
| 8 | |
| 9 /* -- Register allocator extensions --------------------------------------- */ | |
| 10 | |
| 11 /* Allocate a register with a hint. */ | |
| 12 static Reg ra_hintalloc(ASMState *as, IRRef ref, Reg hint, RegSet allow) | |
| 13 { | |
| 14 Reg r = IR(ref)->r; | |
| 15 if (ra_noreg(r)) { | |
| 16 if (!ra_hashint(r) && !iscrossref(as, ref)) | |
| 17 ra_sethint(IR(ref)->r, hint); /* Propagate register hint. */ | |
| 18 r = ra_allocref(as, ref, allow); | |
| 19 } | |
| 20 ra_noweak(as, r); | |
| 21 return r; | |
| 22 } | |
| 23 | |
| 24 /* Allocate two source registers for three-operand instructions. */ | |
| 25 static Reg ra_alloc2(ASMState *as, IRIns *ir, RegSet allow) | |
| 26 { | |
| 27 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); | |
| 28 Reg left = irl->r, right = irr->r; | |
| 29 if (ra_hasreg(left)) { | |
| 30 ra_noweak(as, left); | |
| 31 if (ra_noreg(right)) | |
| 32 right = ra_allocref(as, ir->op2, rset_exclude(allow, left)); | |
| 33 else | |
| 34 ra_noweak(as, right); | |
| 35 } else if (ra_hasreg(right)) { | |
| 36 ra_noweak(as, right); | |
| 37 left = ra_allocref(as, ir->op1, rset_exclude(allow, right)); | |
| 38 } else if (ra_hashint(right)) { | |
| 39 right = ra_allocref(as, ir->op2, allow); | |
| 40 left = ra_alloc1(as, ir->op1, rset_exclude(allow, right)); | |
| 41 } else { | |
| 42 left = ra_allocref(as, ir->op1, allow); | |
| 43 right = ra_alloc1(as, ir->op2, rset_exclude(allow, left)); | |
| 44 } | |
| 45 return left | (right << 8); | |
| 46 } | |
| 47 | |
| 48 /* -- Guard handling ------------------------------------------------------ */ | |
| 49 | |
| 50 /* Setup all needed exit stubs. */ | |
| 51 static void asm_exitstub_setup(ASMState *as, ExitNo nexits) | |
| 52 { | |
| 53 ExitNo i; | |
| 54 MCode *mxp = as->mctop; | |
| 55 if (mxp - (nexits + 3 + MCLIM_REDZONE) < as->mclim) | |
| 56 asm_mclimit(as); | |
| 57 /* 1: str lr,[sp]; bl ->vm_exit_handler; movz w0,traceno; bl <1; bl <1; ... */ | |
| 58 for (i = nexits-1; (int32_t)i >= 0; i--) | |
| 59 *--mxp = A64I_LE(A64I_BL | A64F_S26(-3-i)); | |
| 60 *--mxp = A64I_LE(A64I_MOVZw | A64F_U16(as->T->traceno)); | |
| 61 mxp--; | |
| 62 *mxp = A64I_LE(A64I_BL | A64F_S26(((MCode *)(void *)lj_vm_exit_handler-mxp))); | |
| 63 *--mxp = A64I_LE(A64I_STRx | A64F_D(RID_LR) | A64F_N(RID_SP)); | |
| 64 as->mctop = mxp; | |
| 65 } | |
| 66 | |
| 67 static MCode *asm_exitstub_addr(ASMState *as, ExitNo exitno) | |
| 68 { | |
| 69 /* Keep this in-sync with exitstub_trace_addr(). */ | |
| 70 return as->mctop + exitno + 3; | |
| 71 } | |
| 72 | |
| 73 /* Emit conditional branch to exit for guard. */ | |
| 74 static void asm_guardcc(ASMState *as, A64CC cc) | |
| 75 { | |
| 76 MCode *target = asm_exitstub_addr(as, as->snapno); | |
| 77 MCode *p = as->mcp; | |
| 78 if (LJ_UNLIKELY(p == as->invmcp)) { | |
| 79 as->loopinv = 1; | |
| 80 *p = A64I_B | A64F_S26(target-p); | |
| 81 emit_cond_branch(as, cc^1, p-1); | |
| 82 return; | |
| 83 } | |
| 84 emit_cond_branch(as, cc, target); | |
| 85 } | |
| 86 | |
| 87 /* Emit test and branch instruction to exit for guard. */ | |
| 88 static void asm_guardtnb(ASMState *as, A64Ins ai, Reg r, uint32_t bit) | |
| 89 { | |
| 90 MCode *target = asm_exitstub_addr(as, as->snapno); | |
| 91 MCode *p = as->mcp; | |
| 92 if (LJ_UNLIKELY(p == as->invmcp)) { | |
| 93 as->loopinv = 1; | |
| 94 *p = A64I_B | A64F_S26(target-p); | |
| 95 emit_tnb(as, ai^0x01000000u, r, bit, p-1); | |
| 96 return; | |
| 97 } | |
| 98 emit_tnb(as, ai, r, bit, target); | |
| 99 } | |
| 100 | |
| 101 /* Emit compare and branch instruction to exit for guard. */ | |
| 102 static void asm_guardcnb(ASMState *as, A64Ins ai, Reg r) | |
| 103 { | |
| 104 MCode *target = asm_exitstub_addr(as, as->snapno); | |
| 105 MCode *p = as->mcp; | |
| 106 if (LJ_UNLIKELY(p == as->invmcp)) { | |
| 107 as->loopinv = 1; | |
| 108 *p = A64I_B | A64F_S26(target-p); | |
| 109 emit_cnb(as, ai^0x01000000u, r, p-1); | |
| 110 return; | |
| 111 } | |
| 112 emit_cnb(as, ai, r, target); | |
| 113 } | |
| 114 | |
| 115 /* -- Operand fusion ------------------------------------------------------ */ | |
| 116 | |
| 117 /* Limit linear search to this distance. Avoids O(n^2) behavior. */ | |
| 118 #define CONFLICT_SEARCH_LIM 31 | |
| 119 | |
| 120 static int asm_isk32(ASMState *as, IRRef ref, int32_t *k) | |
| 121 { | |
| 122 if (irref_isk(ref)) { | |
| 123 IRIns *ir = IR(ref); | |
| 124 if (ir->o == IR_KNULL || !irt_is64(ir->t)) { | |
| 125 *k = ir->i; | |
| 126 return 1; | |
| 127 } else if (checki32((int64_t)ir_k64(ir)->u64)) { | |
| 128 *k = (int32_t)ir_k64(ir)->u64; | |
| 129 return 1; | |
| 130 } | |
| 131 } | |
| 132 return 0; | |
| 133 } | |
| 134 | |
| 135 /* Check if there's no conflicting instruction between curins and ref. */ | |
| 136 static int noconflict(ASMState *as, IRRef ref, IROp conflict) | |
| 137 { | |
| 138 IRIns *ir = as->ir; | |
| 139 IRRef i = as->curins; | |
| 140 if (i > ref + CONFLICT_SEARCH_LIM) | |
| 141 return 0; /* Give up, ref is too far away. */ | |
| 142 while (--i > ref) | |
| 143 if (ir[i].o == conflict) | |
| 144 return 0; /* Conflict found. */ | |
| 145 return 1; /* Ok, no conflict. */ | |
| 146 } | |
| 147 | |
| 148 /* Fuse the array base of colocated arrays. */ | |
| 149 static int32_t asm_fuseabase(ASMState *as, IRRef ref) | |
| 150 { | |
| 151 IRIns *ir = IR(ref); | |
| 152 if (ir->o == IR_TNEW && ir->op1 <= LJ_MAX_COLOSIZE && | |
| 153 !neverfuse(as) && noconflict(as, ref, IR_NEWREF)) | |
| 154 return (int32_t)sizeof(GCtab); | |
| 155 return 0; | |
| 156 } | |
| 157 | |
| 158 #define FUSE_REG 0x40000000 | |
| 159 | |
| 160 /* Fuse array/hash/upvalue reference into register+offset operand. */ | |
| 161 static Reg asm_fuseahuref(ASMState *as, IRRef ref, int32_t *ofsp, RegSet allow, | |
| 162 A64Ins ins) | |
| 163 { | |
| 164 IRIns *ir = IR(ref); | |
| 165 if (ra_noreg(ir->r)) { | |
| 166 if (ir->o == IR_AREF) { | |
| 167 if (mayfuse(as, ref)) { | |
| 168 if (irref_isk(ir->op2)) { | |
| 169 IRRef tab = IR(ir->op1)->op1; | |
| 170 int32_t ofs = asm_fuseabase(as, tab); | |
| 171 IRRef refa = ofs ? tab : ir->op1; | |
| 172 ofs += 8*IR(ir->op2)->i; | |
| 173 if (emit_checkofs(ins, ofs)) { | |
| 174 *ofsp = ofs; | |
| 175 return ra_alloc1(as, refa, allow); | |
| 176 } | |
| 177 } else { | |
| 178 Reg base = ra_alloc1(as, ir->op1, allow); | |
| 179 *ofsp = FUSE_REG|ra_alloc1(as, ir->op2, rset_exclude(allow, base)); | |
| 180 return base; | |
| 181 } | |
| 182 } | |
| 183 } else if (ir->o == IR_HREFK) { | |
| 184 if (mayfuse(as, ref)) { | |
| 185 int32_t ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); | |
| 186 if (emit_checkofs(ins, ofs)) { | |
| 187 *ofsp = ofs; | |
| 188 return ra_alloc1(as, ir->op1, allow); | |
| 189 } | |
| 190 } | |
| 191 } else if (ir->o == IR_UREFC) { | |
| 192 if (irref_isk(ir->op1)) { | |
| 193 GCfunc *fn = ir_kfunc(IR(ir->op1)); | |
| 194 GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv; | |
| 195 int64_t ofs = glofs(as, &uv->tv); | |
| 196 if (emit_checkofs(ins, ofs)) { | |
| 197 *ofsp = (int32_t)ofs; | |
| 198 return RID_GL; | |
| 199 } | |
| 200 } | |
| 201 } else if (ir->o == IR_TMPREF) { | |
| 202 *ofsp = (int32_t)glofs(as, &J2G(as->J)->tmptv); | |
| 203 return RID_GL; | |
| 204 } | |
| 205 } | |
| 206 *ofsp = 0; | |
| 207 return ra_alloc1(as, ref, allow); | |
| 208 } | |
| 209 | |
| 210 /* Fuse m operand into arithmetic/logic instructions. */ | |
| 211 static uint32_t asm_fuseopm(ASMState *as, A64Ins ai, IRRef ref, RegSet allow) | |
| 212 { | |
| 213 IRIns *ir = IR(ref); | |
| 214 if (ra_hasreg(ir->r)) { | |
| 215 ra_noweak(as, ir->r); | |
| 216 return A64F_M(ir->r); | |
| 217 } else if (irref_isk(ref)) { | |
| 218 uint32_t m; | |
| 219 int64_t k = get_k64val(as, ref); | |
| 220 if ((ai & 0x1f000000) == 0x0a000000) | |
| 221 m = emit_isk13(k, irt_is64(ir->t)); | |
| 222 else | |
| 223 m = emit_isk12(k); | |
| 224 if (m) | |
| 225 return m; | |
| 226 } else if (mayfuse(as, ref)) { | |
| 227 if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR && irref_isk(ir->op2)) || | |
| 228 (ir->o == IR_ADD && ir->op1 == ir->op2)) { | |
| 229 A64Shift sh = ir->o == IR_BSHR ? A64SH_LSR : | |
| 230 ir->o == IR_BSAR ? A64SH_ASR : A64SH_LSL; | |
| 231 int shift = ir->o == IR_ADD ? 1 : | |
| 232 (IR(ir->op2)->i & (irt_is64(ir->t) ? 63 : 31)); | |
| 233 IRIns *irl = IR(ir->op1); | |
| 234 if (sh == A64SH_LSL && | |
| 235 irl->o == IR_CONV && | |
| 236 irl->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT) && | |
| 237 shift <= 4 && | |
| 238 canfuse(as, irl)) { | |
| 239 Reg m = ra_alloc1(as, irl->op1, allow); | |
| 240 return A64F_M(m) | A64F_EXSH(A64EX_SXTW, shift); | |
| 241 } else { | |
| 242 Reg m = ra_alloc1(as, ir->op1, allow); | |
| 243 return A64F_M(m) | A64F_SH(sh, shift); | |
| 244 } | |
| 245 } else if (ir->o == IR_CONV && | |
| 246 ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT)) { | |
| 247 Reg m = ra_alloc1(as, ir->op1, allow); | |
| 248 return A64F_M(m) | A64F_EX(A64EX_SXTW); | |
| 249 } | |
| 250 } | |
| 251 return A64F_M(ra_allocref(as, ref, allow)); | |
| 252 } | |
| 253 | |
| 254 /* Fuse XLOAD/XSTORE reference into load/store operand. */ | |
| 255 static void asm_fusexref(ASMState *as, A64Ins ai, Reg rd, IRRef ref, | |
| 256 RegSet allow) | |
| 257 { | |
| 258 IRIns *ir = IR(ref); | |
| 259 Reg base; | |
| 260 int32_t ofs = 0; | |
| 261 if (ra_noreg(ir->r) && canfuse(as, ir)) { | |
| 262 if (ir->o == IR_ADD) { | |
| 263 if (asm_isk32(as, ir->op2, &ofs) && emit_checkofs(ai, ofs)) { | |
| 264 ref = ir->op1; | |
| 265 } else { | |
| 266 Reg rn, rm; | |
| 267 IRRef lref = ir->op1, rref = ir->op2; | |
| 268 IRIns *irl = IR(lref); | |
| 269 if (mayfuse(as, irl->op1)) { | |
| 270 unsigned int shift = 4; | |
| 271 if (irl->o == IR_BSHL && irref_isk(irl->op2)) { | |
| 272 shift = (IR(irl->op2)->i & 63); | |
| 273 } else if (irl->o == IR_ADD && irl->op1 == irl->op2) { | |
| 274 shift = 1; | |
| 275 } | |
| 276 if ((ai >> 30) == shift) { | |
| 277 lref = irl->op1; | |
| 278 irl = IR(lref); | |
| 279 ai |= A64I_LS_SH; | |
| 280 } | |
| 281 } | |
| 282 if (irl->o == IR_CONV && | |
| 283 irl->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT) && | |
| 284 canfuse(as, irl)) { | |
| 285 lref = irl->op1; | |
| 286 ai |= A64I_LS_SXTWx; | |
| 287 } else { | |
| 288 ai |= A64I_LS_LSLx; | |
| 289 } | |
| 290 rm = ra_alloc1(as, lref, allow); | |
| 291 rn = ra_alloc1(as, rref, rset_exclude(allow, rm)); | |
| 292 emit_dnm(as, (ai^A64I_LS_R), (rd & 31), rn, rm); | |
| 293 return; | |
| 294 } | |
| 295 } else if (ir->o == IR_STRREF) { | |
| 296 if (asm_isk32(as, ir->op2, &ofs)) { | |
| 297 ref = ir->op1; | |
| 298 } else if (asm_isk32(as, ir->op1, &ofs)) { | |
| 299 ref = ir->op2; | |
| 300 } else { | |
| 301 Reg refk = irref_isk(ir->op1) ? ir->op1 : ir->op2; | |
| 302 Reg refv = irref_isk(ir->op1) ? ir->op2 : ir->op1; | |
| 303 Reg rn = ra_alloc1(as, refv, allow); | |
| 304 IRIns *irr = IR(refk); | |
| 305 uint32_t m; | |
| 306 if (irr+1 == ir && !ra_used(irr) && | |
| 307 irr->o == IR_ADD && irref_isk(irr->op2)) { | |
| 308 ofs = sizeof(GCstr) + IR(irr->op2)->i; | |
| 309 if (emit_checkofs(ai, ofs)) { | |
| 310 Reg rm = ra_alloc1(as, irr->op1, rset_exclude(allow, rn)); | |
| 311 m = A64F_M(rm) | A64F_EX(A64EX_SXTW); | |
| 312 goto skipopm; | |
| 313 } | |
| 314 } | |
| 315 m = asm_fuseopm(as, 0, refk, rset_exclude(allow, rn)); | |
| 316 ofs = sizeof(GCstr); | |
| 317 skipopm: | |
| 318 emit_lso(as, ai, rd, rd, ofs); | |
| 319 emit_dn(as, A64I_ADDx^m, rd, rn); | |
| 320 return; | |
| 321 } | |
| 322 ofs += sizeof(GCstr); | |
| 323 if (!emit_checkofs(ai, ofs)) { | |
| 324 Reg rn = ra_alloc1(as, ref, allow); | |
| 325 Reg rm = ra_allock(as, ofs, rset_exclude(allow, rn)); | |
| 326 emit_dnm(as, (ai^A64I_LS_R)|A64I_LS_UXTWx, rd, rn, rm); | |
| 327 return; | |
| 328 } | |
| 329 } | |
| 330 } | |
| 331 base = ra_alloc1(as, ref, allow); | |
| 332 emit_lso(as, ai, (rd & 31), base, ofs); | |
| 333 } | |
| 334 | |
| 335 /* Fuse FP multiply-add/sub. */ | |
| 336 static int asm_fusemadd(ASMState *as, IRIns *ir, A64Ins ai, A64Ins air) | |
| 337 { | |
| 338 IRRef lref = ir->op1, rref = ir->op2; | |
| 339 IRIns *irm; | |
| 340 if ((as->flags & JIT_F_OPT_FMA) && | |
| 341 lref != rref && | |
| 342 ((mayfuse(as, lref) && (irm = IR(lref), irm->o == IR_MUL) && | |
| 343 ra_noreg(irm->r)) || | |
| 344 (mayfuse(as, rref) && (irm = IR(rref), irm->o == IR_MUL) && | |
| 345 (rref = lref, ai = air, ra_noreg(irm->r))))) { | |
| 346 Reg dest = ra_dest(as, ir, RSET_FPR); | |
| 347 Reg add = ra_hintalloc(as, rref, dest, RSET_FPR); | |
| 348 Reg left = ra_alloc2(as, irm, | |
| 349 rset_exclude(rset_exclude(RSET_FPR, dest), add)); | |
| 350 Reg right = (left >> 8); left &= 255; | |
| 351 emit_dnma(as, ai, (dest & 31), (left & 31), (right & 31), (add & 31)); | |
| 352 return 1; | |
| 353 } | |
| 354 return 0; | |
| 355 } | |
| 356 | |
| 357 /* Fuse BAND + BSHL/BSHR into UBFM. */ | |
| 358 static int asm_fuseandshift(ASMState *as, IRIns *ir) | |
| 359 { | |
| 360 IRIns *irl = IR(ir->op1); | |
| 361 lj_assertA(ir->o == IR_BAND, "bad usage"); | |
| 362 if (canfuse(as, irl) && irref_isk(ir->op2)) { | |
| 363 uint64_t mask = get_k64val(as, ir->op2); | |
| 364 if (irref_isk(irl->op2) && (irl->o == IR_BSHR || irl->o == IR_BSHL)) { | |
| 365 int32_t shmask = irt_is64(irl->t) ? 63 : 31; | |
| 366 int32_t shift = (IR(irl->op2)->i & shmask); | |
| 367 int32_t imms = shift; | |
| 368 if (irl->o == IR_BSHL) { | |
| 369 mask >>= shift; | |
| 370 shift = (shmask-shift+1) & shmask; | |
| 371 imms = 0; | |
| 372 } | |
| 373 if (mask && !((mask+1) & mask)) { /* Contiguous 1-bits at the bottom. */ | |
| 374 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 375 Reg left = ra_alloc1(as, irl->op1, RSET_GPR); | |
| 376 A64Ins ai = shmask == 63 ? A64I_UBFMx : A64I_UBFMw; | |
| 377 imms += 63 - emit_clz64(mask); | |
| 378 if (imms > shmask) imms = shmask; | |
| 379 emit_dn(as, ai | A64F_IMMS(imms) | A64F_IMMR(shift), dest, left); | |
| 380 return 1; | |
| 381 } | |
| 382 } | |
| 383 } | |
| 384 return 0; | |
| 385 } | |
| 386 | |
| 387 /* Fuse BOR(BSHL, BSHR) into EXTR/ROR. */ | |
| 388 static int asm_fuseorshift(ASMState *as, IRIns *ir) | |
| 389 { | |
| 390 IRIns *irl = IR(ir->op1), *irr = IR(ir->op2); | |
| 391 lj_assertA(ir->o == IR_BOR, "bad usage"); | |
| 392 if (canfuse(as, irl) && canfuse(as, irr) && | |
| 393 ((irl->o == IR_BSHR && irr->o == IR_BSHL) || | |
| 394 (irl->o == IR_BSHL && irr->o == IR_BSHR))) { | |
| 395 if (irref_isk(irl->op2) && irref_isk(irr->op2)) { | |
| 396 IRRef lref = irl->op1, rref = irr->op1; | |
| 397 uint32_t lshift = IR(irl->op2)->i, rshift = IR(irr->op2)->i; | |
| 398 if (irl->o == IR_BSHR) { /* BSHR needs to be the right operand. */ | |
| 399 uint32_t tmp2; | |
| 400 IRRef tmp1 = lref; lref = rref; rref = tmp1; | |
| 401 tmp2 = lshift; lshift = rshift; rshift = tmp2; | |
| 402 } | |
| 403 if (rshift + lshift == (irt_is64(ir->t) ? 64 : 32)) { | |
| 404 A64Ins ai = irt_is64(ir->t) ? A64I_EXTRx : A64I_EXTRw; | |
| 405 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 406 Reg left = ra_alloc1(as, lref, RSET_GPR); | |
| 407 Reg right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, left)); | |
| 408 emit_dnm(as, ai | A64F_IMMS(rshift), dest, left, right); | |
| 409 return 1; | |
| 410 } | |
| 411 } | |
| 412 } | |
| 413 return 0; | |
| 414 } | |
| 415 | |
| 416 /* -- Calls --------------------------------------------------------------- */ | |
| 417 | |
| 418 /* Generate a call to a C function. */ | |
| 419 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |
| 420 { | |
| 421 uint32_t n, nargs = CCI_XNARGS(ci); | |
| 422 int32_t ofs = 0; | |
| 423 Reg gpr, fpr = REGARG_FIRSTFPR; | |
| 424 if (ci->func) | |
| 425 emit_call(as, ci->func); | |
| 426 for (gpr = REGARG_FIRSTGPR; gpr <= REGARG_LASTGPR; gpr++) | |
| 427 as->cost[gpr] = REGCOST(~0u, ASMREF_L); | |
| 428 gpr = REGARG_FIRSTGPR; | |
| 429 for (n = 0; n < nargs; n++) { /* Setup args. */ | |
| 430 IRRef ref = args[n]; | |
| 431 IRIns *ir = IR(ref); | |
| 432 if (ref) { | |
| 433 if (irt_isfp(ir->t)) { | |
| 434 if (fpr <= REGARG_LASTFPR) { | |
| 435 lj_assertA(rset_test(as->freeset, fpr), | |
| 436 "reg %d not free", fpr); /* Must have been evicted. */ | |
| 437 ra_leftov(as, fpr, ref); | |
| 438 fpr++; | |
| 439 } else { | |
| 440 Reg r = ra_alloc1(as, ref, RSET_FPR); | |
| 441 emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_isnum(ir->t)) ? 4 : 0)); | |
| 442 ofs += 8; | |
| 443 } | |
| 444 } else { | |
| 445 if (gpr <= REGARG_LASTGPR) { | |
| 446 lj_assertA(rset_test(as->freeset, gpr), | |
| 447 "reg %d not free", gpr); /* Must have been evicted. */ | |
| 448 ra_leftov(as, gpr, ref); | |
| 449 gpr++; | |
| 450 } else { | |
| 451 Reg r = ra_alloc1(as, ref, RSET_GPR); | |
| 452 emit_spstore(as, ir, r, ofs + ((LJ_BE && !irt_is64(ir->t)) ? 4 : 0)); | |
| 453 ofs += 8; | |
| 454 } | |
| 455 } | |
| 456 } | |
| 457 } | |
| 458 } | |
| 459 | |
| 460 /* Setup result reg/sp for call. Evict scratch regs. */ | |
| 461 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) | |
| 462 { | |
| 463 RegSet drop = RSET_SCRATCH; | |
| 464 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t)); | |
| 465 if (ra_hasreg(ir->r)) | |
| 466 rset_clear(drop, ir->r); /* Dest reg handled below. */ | |
| 467 if (hiop && ra_hasreg((ir+1)->r)) | |
| 468 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ | |
| 469 ra_evictset(as, drop); /* Evictions must be performed first. */ | |
| 470 if (ra_used(ir)) { | |
| 471 lj_assertA(!irt_ispri(ir->t), "PRI dest"); | |
| 472 if (irt_isfp(ir->t)) { | |
| 473 if (ci->flags & CCI_CASTU64) { | |
| 474 Reg dest = ra_dest(as, ir, RSET_FPR) & 31; | |
| 475 emit_dn(as, irt_isnum(ir->t) ? A64I_FMOV_D_R : A64I_FMOV_S_R, | |
| 476 dest, RID_RET); | |
| 477 } else { | |
| 478 ra_destreg(as, ir, RID_FPRET); | |
| 479 } | |
| 480 } else if (hiop) { | |
| 481 ra_destpair(as, ir); | |
| 482 } else { | |
| 483 ra_destreg(as, ir, RID_RET); | |
| 484 } | |
| 485 } | |
| 486 UNUSED(ci); | |
| 487 } | |
| 488 | |
| 489 static void asm_callx(ASMState *as, IRIns *ir) | |
| 490 { | |
| 491 IRRef args[CCI_NARGS_MAX*2]; | |
| 492 CCallInfo ci; | |
| 493 IRRef func; | |
| 494 IRIns *irf; | |
| 495 ci.flags = asm_callx_flags(as, ir); | |
| 496 asm_collectargs(as, ir, &ci, args); | |
| 497 asm_setupresult(as, ir, &ci); | |
| 498 func = ir->op2; irf = IR(func); | |
| 499 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } | |
| 500 if (irref_isk(func)) { /* Call to constant address. */ | |
| 501 ci.func = (ASMFunction)(ir_k64(irf)->u64); | |
| 502 } else { /* Need a non-argument register for indirect calls. */ | |
| 503 Reg freg = ra_alloc1(as, func, RSET_RANGE(RID_X8, RID_MAX_GPR)-RSET_FIXED); | |
| 504 emit_n(as, A64I_BLR_AUTH, freg); | |
| 505 ci.func = (ASMFunction)(void *)0; | |
| 506 } | |
| 507 asm_gencall(as, &ci, args); | |
| 508 } | |
| 509 | |
| 510 /* -- Returns ------------------------------------------------------------- */ | |
| 511 | |
| 512 /* Return to lower frame. Guard that it goes to the right spot. */ | |
| 513 static void asm_retf(ASMState *as, IRIns *ir) | |
| 514 { | |
| 515 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); | |
| 516 void *pc = ir_kptr(IR(ir->op2)); | |
| 517 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); | |
| 518 as->topslot -= (BCReg)delta; | |
| 519 if ((int32_t)as->topslot < 0) as->topslot = 0; | |
| 520 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ | |
| 521 /* Need to force a spill on REF_BASE now to update the stack slot. */ | |
| 522 emit_lso(as, A64I_STRx, base, RID_SP, ra_spill(as, IR(REF_BASE))); | |
| 523 emit_setgl(as, base, jit_base); | |
| 524 emit_addptr(as, base, -8*delta); | |
| 525 asm_guardcc(as, CC_NE); | |
| 526 emit_nm(as, A64I_CMPx, RID_TMP, | |
| 527 ra_allock(as, i64ptr(pc), rset_exclude(RSET_GPR, base))); | |
| 528 emit_lso(as, A64I_LDRx, RID_TMP, base, -8); | |
| 529 } | |
| 530 | |
| 531 /* -- Buffer operations --------------------------------------------------- */ | |
| 532 | |
| 533 #if LJ_HASBUFFER | |
| 534 static void asm_bufhdr_write(ASMState *as, Reg sb) | |
| 535 { | |
| 536 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb)); | |
| 537 IRIns irgc; | |
| 538 irgc.ot = IRT(0, IRT_PGC); /* GC type. */ | |
| 539 emit_storeofs(as, &irgc, RID_TMP, sb, offsetof(SBuf, L)); | |
| 540 emit_dn(as, A64I_BFMx | A64F_IMMS(lj_fls(SBUF_MASK_FLAG)) | A64F_IMMR(0), RID_TMP, tmp); | |
| 541 emit_getgl(as, RID_TMP, cur_L); | |
| 542 emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L)); | |
| 543 } | |
| 544 #endif | |
| 545 | |
| 546 /* -- Type conversions ---------------------------------------------------- */ | |
| 547 | |
| 548 static void asm_tointg(ASMState *as, IRIns *ir, Reg left) | |
| 549 { | |
| 550 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); | |
| 551 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 552 asm_guardcc(as, CC_NE); | |
| 553 emit_nm(as, A64I_FCMPd, (tmp & 31), (left & 31)); | |
| 554 emit_dn(as, A64I_FCVT_F64_S32, (tmp & 31), dest); | |
| 555 emit_dn(as, A64I_FCVT_S32_F64, dest, (left & 31)); | |
| 556 } | |
| 557 | |
| 558 static void asm_tobit(ASMState *as, IRIns *ir) | |
| 559 { | |
| 560 RegSet allow = RSET_FPR; | |
| 561 Reg left = ra_alloc1(as, ir->op1, allow); | |
| 562 Reg right = ra_alloc1(as, ir->op2, rset_clear(allow, left)); | |
| 563 Reg tmp = ra_scratch(as, rset_clear(allow, right)); | |
| 564 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 565 emit_dn(as, A64I_FMOV_R_S, dest, (tmp & 31)); | |
| 566 emit_dnm(as, A64I_FADDd, (tmp & 31), (left & 31), (right & 31)); | |
| 567 } | |
| 568 | |
| 569 static void asm_conv(ASMState *as, IRIns *ir) | |
| 570 { | |
| 571 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); | |
| 572 int st64 = (st == IRT_I64 || st == IRT_U64 || st == IRT_P64); | |
| 573 int stfp = (st == IRT_NUM || st == IRT_FLOAT); | |
| 574 IRRef lref = ir->op1; | |
| 575 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV"); | |
| 576 if (irt_isfp(ir->t)) { | |
| 577 Reg dest = ra_dest(as, ir, RSET_FPR); | |
| 578 if (stfp) { /* FP to FP conversion. */ | |
| 579 emit_dn(as, st == IRT_NUM ? A64I_FCVT_F32_F64 : A64I_FCVT_F64_F32, | |
| 580 (dest & 31), (ra_alloc1(as, lref, RSET_FPR) & 31)); | |
| 581 } else { /* Integer to FP conversion. */ | |
| 582 Reg left = ra_alloc1(as, lref, RSET_GPR); | |
| 583 A64Ins ai = irt_isfloat(ir->t) ? | |
| 584 (((IRT_IS64 >> st) & 1) ? | |
| 585 (st == IRT_I64 ? A64I_FCVT_F32_S64 : A64I_FCVT_F32_U64) : | |
| 586 (st == IRT_INT ? A64I_FCVT_F32_S32 : A64I_FCVT_F32_U32)) : | |
| 587 (((IRT_IS64 >> st) & 1) ? | |
| 588 (st == IRT_I64 ? A64I_FCVT_F64_S64 : A64I_FCVT_F64_U64) : | |
| 589 (st == IRT_INT ? A64I_FCVT_F64_S32 : A64I_FCVT_F64_U32)); | |
| 590 emit_dn(as, ai, (dest & 31), left); | |
| 591 } | |
| 592 } else if (stfp) { /* FP to integer conversion. */ | |
| 593 if (irt_isguard(ir->t)) { | |
| 594 /* Checked conversions are only supported from number to int. */ | |
| 595 lj_assertA(irt_isint(ir->t) && st == IRT_NUM, | |
| 596 "bad type for checked CONV"); | |
| 597 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); | |
| 598 } else { | |
| 599 Reg left = ra_alloc1(as, lref, RSET_FPR); | |
| 600 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 601 A64Ins ai = irt_is64(ir->t) ? | |
| 602 (st == IRT_NUM ? | |
| 603 (irt_isi64(ir->t) ? A64I_FCVT_S64_F64 : A64I_FCVT_U64_F64) : | |
| 604 (irt_isi64(ir->t) ? A64I_FCVT_S64_F32 : A64I_FCVT_U64_F32)) : | |
| 605 (st == IRT_NUM ? | |
| 606 (irt_isint(ir->t) ? A64I_FCVT_S32_F64 : A64I_FCVT_U32_F64) : | |
| 607 (irt_isint(ir->t) ? A64I_FCVT_S32_F32 : A64I_FCVT_U32_F32)); | |
| 608 emit_dn(as, ai, dest, (left & 31)); | |
| 609 } | |
| 610 } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ | |
| 611 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 612 Reg left = ra_alloc1(as, lref, RSET_GPR); | |
| 613 A64Ins ai = st == IRT_I8 ? A64I_SXTBw : | |
| 614 st == IRT_U8 ? A64I_UXTBw : | |
| 615 st == IRT_I16 ? A64I_SXTHw : A64I_UXTHw; | |
| 616 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT"); | |
| 617 emit_dn(as, ai, dest, left); | |
| 618 } else { | |
| 619 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 620 if (irt_is64(ir->t)) { | |
| 621 if (st64 || !(ir->op2 & IRCONV_SEXT)) { | |
| 622 /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */ | |
| 623 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ | |
| 624 } else { /* 32 to 64 bit sign extension. */ | |
| 625 Reg left = ra_alloc1(as, lref, RSET_GPR); | |
| 626 emit_dn(as, A64I_SXTW, dest, left); | |
| 627 } | |
| 628 } else { | |
| 629 if (st64 && !(ir->op2 & IRCONV_NONE)) { | |
| 630 /* This is either a 32 bit reg/reg mov which zeroes the hiword | |
| 631 ** or a load of the loword from a 64 bit address. | |
| 632 */ | |
| 633 Reg left = ra_alloc1(as, lref, RSET_GPR); | |
| 634 emit_dm(as, A64I_MOVw, dest, left); | |
| 635 } else { /* 32/32 bit no-op (cast). */ | |
| 636 ra_leftov(as, dest, lref); /* Do nothing, but may need to move regs. */ | |
| 637 } | |
| 638 } | |
| 639 } | |
| 640 } | |
| 641 | |
| 642 static void asm_strto(ASMState *as, IRIns *ir) | |
| 643 { | |
| 644 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; | |
| 645 IRRef args[2]; | |
| 646 Reg dest = 0, tmp; | |
| 647 int destused = ra_used(ir); | |
| 648 int32_t ofs = 0; | |
| 649 ra_evictset(as, RSET_SCRATCH); | |
| 650 if (destused) { | |
| 651 if (ra_hasspill(ir->s)) { | |
| 652 ofs = sps_scale(ir->s); | |
| 653 destused = 0; | |
| 654 if (ra_hasreg(ir->r)) { | |
| 655 ra_free(as, ir->r); | |
| 656 ra_modified(as, ir->r); | |
| 657 emit_spload(as, ir, ir->r, ofs); | |
| 658 } | |
| 659 } else { | |
| 660 dest = ra_dest(as, ir, RSET_FPR); | |
| 661 } | |
| 662 } | |
| 663 if (destused) | |
| 664 emit_lso(as, A64I_LDRd, (dest & 31), RID_SP, 0); | |
| 665 asm_guardcnb(as, A64I_CBZ, RID_RET); | |
| 666 args[0] = ir->op1; /* GCstr *str */ | |
| 667 args[1] = ASMREF_TMP1; /* TValue *n */ | |
| 668 asm_gencall(as, ci, args); | |
| 669 tmp = ra_releasetmp(as, ASMREF_TMP1); | |
| 670 emit_opk(as, A64I_ADDx, tmp, RID_SP, ofs, RSET_GPR); | |
| 671 } | |
| 672 | |
| 673 /* -- Memory references --------------------------------------------------- */ | |
| 674 | |
| 675 /* Store tagged value for ref at base+ofs. */ | |
| 676 static void asm_tvstore64(ASMState *as, Reg base, int32_t ofs, IRRef ref) | |
| 677 { | |
| 678 RegSet allow = rset_exclude(RSET_GPR, base); | |
| 679 IRIns *ir = IR(ref); | |
| 680 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || irt_isinteger(ir->t), | |
| 681 "store of IR type %d", irt_type(ir->t)); | |
| 682 if (irref_isk(ref)) { | |
| 683 TValue k; | |
| 684 lj_ir_kvalue(as->J->L, &k, ir); | |
| 685 emit_lso(as, A64I_STRx, ra_allock(as, k.u64, allow), base, ofs); | |
| 686 } else { | |
| 687 Reg src = ra_alloc1(as, ref, allow); | |
| 688 rset_clear(allow, src); | |
| 689 if (irt_isinteger(ir->t)) { | |
| 690 Reg type = ra_allock(as, (int64_t)irt_toitype(ir->t) << 47, allow); | |
| 691 emit_lso(as, A64I_STRx, RID_TMP, base, ofs); | |
| 692 emit_dnm(as, A64I_ADDx | A64F_EX(A64EX_UXTW), RID_TMP, type, src); | |
| 693 } else { | |
| 694 Reg type = ra_allock(as, (int32_t)irt_toitype(ir->t), allow); | |
| 695 emit_lso(as, A64I_STRx, RID_TMP, base, ofs); | |
| 696 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), RID_TMP, src, type); | |
| 697 } | |
| 698 } | |
| 699 } | |
| 700 | |
| 701 /* Get pointer to TValue. */ | |
| 702 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode) | |
| 703 { | |
| 704 if ((mode & IRTMPREF_IN1)) { | |
| 705 IRIns *ir = IR(ref); | |
| 706 if (irt_isnum(ir->t)) { | |
| 707 if (irref_isk(ref) && !(mode & IRTMPREF_OUT1)) { | |
| 708 /* Use the number constant itself as a TValue. */ | |
| 709 ra_allockreg(as, i64ptr(ir_knum(ir)), dest); | |
| 710 return; | |
| 711 } | |
| 712 emit_lso(as, A64I_STRd, (ra_alloc1(as, ref, RSET_FPR) & 31), dest, 0); | |
| 713 } else { | |
| 714 asm_tvstore64(as, dest, 0, ref); | |
| 715 } | |
| 716 } | |
| 717 /* g->tmptv holds the TValue(s). */ | |
| 718 emit_dn(as, A64I_ADDx^emit_isk12(glofs(as, &J2G(as->J)->tmptv)), dest, RID_GL); | |
| 719 } | |
| 720 | |
| 721 static void asm_aref(ASMState *as, IRIns *ir) | |
| 722 { | |
| 723 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 724 Reg idx, base; | |
| 725 if (irref_isk(ir->op2)) { | |
| 726 IRRef tab = IR(ir->op1)->op1; | |
| 727 int32_t ofs = asm_fuseabase(as, tab); | |
| 728 IRRef refa = ofs ? tab : ir->op1; | |
| 729 uint32_t k = emit_isk12(ofs + 8*IR(ir->op2)->i); | |
| 730 if (k) { | |
| 731 base = ra_alloc1(as, refa, RSET_GPR); | |
| 732 emit_dn(as, A64I_ADDx^k, dest, base); | |
| 733 return; | |
| 734 } | |
| 735 } | |
| 736 base = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 737 idx = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, base)); | |
| 738 emit_dnm(as, A64I_ADDx | A64F_EXSH(A64EX_UXTW, 3), dest, base, idx); | |
| 739 } | |
| 740 | |
| 741 /* Inlined hash lookup. Specialized for key type and for const keys. | |
| 742 ** The equivalent C code is: | |
| 743 ** Node *n = hashkey(t, key); | |
| 744 ** do { | |
| 745 ** if (lj_obj_equal(&n->key, key)) return &n->val; | |
| 746 ** } while ((n = nextnode(n))); | |
| 747 ** return niltv(L); | |
| 748 */ | |
| 749 static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |
| 750 { | |
| 751 RegSet allow = RSET_GPR; | |
| 752 int destused = ra_used(ir); | |
| 753 Reg dest = ra_dest(as, ir, allow); | |
| 754 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); | |
| 755 Reg key = 0, tmp = RID_TMP; | |
| 756 Reg ftmp = RID_NONE, type = RID_NONE, scr = RID_NONE, tisnum = RID_NONE; | |
| 757 IRRef refkey = ir->op2; | |
| 758 IRIns *irkey = IR(refkey); | |
| 759 int isk = irref_isk(ir->op2); | |
| 760 IRType1 kt = irkey->t; | |
| 761 uint32_t k = 0; | |
| 762 uint32_t khash; | |
| 763 MCLabel l_end, l_loop, l_next; | |
| 764 rset_clear(allow, tab); | |
| 765 | |
| 766 if (!isk) { | |
| 767 key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow); | |
| 768 rset_clear(allow, key); | |
| 769 if (!irt_isstr(kt)) { | |
| 770 tmp = ra_scratch(as, allow); | |
| 771 rset_clear(allow, tmp); | |
| 772 } | |
| 773 } else if (irt_isnum(kt)) { | |
| 774 int64_t val = (int64_t)ir_knum(irkey)->u64; | |
| 775 if (!(k = emit_isk12(val))) { | |
| 776 key = ra_allock(as, val, allow); | |
| 777 rset_clear(allow, key); | |
| 778 } | |
| 779 } else if (!irt_ispri(kt)) { | |
| 780 if (!(k = emit_isk12(irkey->i))) { | |
| 781 key = ra_alloc1(as, refkey, allow); | |
| 782 rset_clear(allow, key); | |
| 783 } | |
| 784 } | |
| 785 | |
| 786 /* Allocate constants early. */ | |
| 787 if (irt_isnum(kt)) { | |
| 788 if (!isk) { | |
| 789 tisnum = ra_allock(as, LJ_TISNUM << 15, allow); | |
| 790 ftmp = ra_scratch(as, rset_exclude(RSET_FPR, key)); | |
| 791 rset_clear(allow, tisnum); | |
| 792 } | |
| 793 } else if (irt_isaddr(kt)) { | |
| 794 if (isk) { | |
| 795 int64_t kk = ((int64_t)irt_toitype(kt) << 47) | irkey[1].tv.u64; | |
| 796 scr = ra_allock(as, kk, allow); | |
| 797 } else { | |
| 798 scr = ra_scratch(as, allow); | |
| 799 } | |
| 800 rset_clear(allow, scr); | |
| 801 } else { | |
| 802 lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type"); | |
| 803 type = ra_allock(as, ~((int64_t)~irt_toitype(kt) << 47), allow); | |
| 804 scr = ra_scratch(as, rset_clear(allow, type)); | |
| 805 rset_clear(allow, scr); | |
| 806 } | |
| 807 | |
| 808 /* Key not found in chain: jump to exit (if merged) or load niltv. */ | |
| 809 l_end = emit_label(as); | |
| 810 as->invmcp = NULL; | |
| 811 if (merge == IR_NE) | |
| 812 asm_guardcc(as, CC_AL); | |
| 813 else if (destused) | |
| 814 emit_loada(as, dest, niltvg(J2G(as->J))); | |
| 815 | |
| 816 /* Follow hash chain until the end. */ | |
| 817 l_loop = --as->mcp; | |
| 818 emit_n(as, A64I_CMPx^A64I_K12^0, dest); | |
| 819 emit_lso(as, A64I_LDRx, dest, dest, offsetof(Node, next)); | |
| 820 l_next = emit_label(as); | |
| 821 | |
| 822 /* Type and value comparison. */ | |
| 823 if (merge == IR_EQ) | |
| 824 asm_guardcc(as, CC_EQ); | |
| 825 else | |
| 826 emit_cond_branch(as, CC_EQ, l_end); | |
| 827 | |
| 828 if (irt_isnum(kt)) { | |
| 829 if (isk) { | |
| 830 /* Assumes -0.0 is already canonicalized to +0.0. */ | |
| 831 if (k) | |
| 832 emit_n(as, A64I_CMPx^k, tmp); | |
| 833 else | |
| 834 emit_nm(as, A64I_CMPx, key, tmp); | |
| 835 emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.u64)); | |
| 836 } else { | |
| 837 emit_nm(as, A64I_FCMPd, key, ftmp); | |
| 838 emit_dn(as, A64I_FMOV_D_R, (ftmp & 31), (tmp & 31)); | |
| 839 emit_cond_branch(as, CC_LO, l_next); | |
| 840 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), tisnum, tmp); | |
| 841 emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.n)); | |
| 842 } | |
| 843 } else if (irt_isaddr(kt)) { | |
| 844 if (isk) { | |
| 845 emit_nm(as, A64I_CMPx, scr, tmp); | |
| 846 emit_lso(as, A64I_LDRx, tmp, dest, offsetof(Node, key.u64)); | |
| 847 } else { | |
| 848 emit_nm(as, A64I_CMPx, tmp, scr); | |
| 849 emit_lso(as, A64I_LDRx, scr, dest, offsetof(Node, key.u64)); | |
| 850 } | |
| 851 } else { | |
| 852 emit_nm(as, A64I_CMPx, scr, type); | |
| 853 emit_lso(as, A64I_LDRx, scr, dest, offsetof(Node, key)); | |
| 854 } | |
| 855 | |
| 856 *l_loop = A64I_BCC | A64F_S19(as->mcp - l_loop) | CC_NE; | |
| 857 if (!isk && irt_isaddr(kt)) { | |
| 858 type = ra_allock(as, (int32_t)irt_toitype(kt), allow); | |
| 859 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), tmp, key, type); | |
| 860 rset_clear(allow, type); | |
| 861 } | |
| 862 /* Load main position relative to tab->node into dest. */ | |
| 863 khash = isk ? ir_khash(as, irkey) : 1; | |
| 864 if (khash == 0) { | |
| 865 emit_lso(as, A64I_LDRx, dest, tab, offsetof(GCtab, node)); | |
| 866 } else { | |
| 867 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 3), dest, tmp, dest); | |
| 868 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 1), dest, dest, dest); | |
| 869 emit_lso(as, A64I_LDRx, tmp, tab, offsetof(GCtab, node)); | |
| 870 if (isk) { | |
| 871 Reg tmphash = ra_allock(as, khash, allow); | |
| 872 emit_dnm(as, A64I_ANDw, dest, dest, tmphash); | |
| 873 emit_lso(as, A64I_LDRw, dest, tab, offsetof(GCtab, hmask)); | |
| 874 } else if (irt_isstr(kt)) { | |
| 875 /* Fetch of str->sid is cheaper than ra_allock. */ | |
| 876 emit_dnm(as, A64I_ANDw, dest, dest, tmp); | |
| 877 emit_lso(as, A64I_LDRw, tmp, key, offsetof(GCstr, sid)); | |
| 878 emit_lso(as, A64I_LDRw, dest, tab, offsetof(GCtab, hmask)); | |
| 879 } else { /* Must match with hash*() in lj_tab.c. */ | |
| 880 emit_dnm(as, A64I_ANDw, dest, dest, tmp); | |
| 881 emit_lso(as, A64I_LDRw, tmp, tab, offsetof(GCtab, hmask)); | |
| 882 emit_dnm(as, A64I_SUBw, dest, dest, tmp); | |
| 883 emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT3)), tmp, tmp, tmp); | |
| 884 emit_dnm(as, A64I_EORw, dest, dest, tmp); | |
| 885 emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT2)), dest, dest, dest); | |
| 886 emit_dnm(as, A64I_SUBw, tmp, tmp, dest); | |
| 887 emit_dnm(as, A64I_EXTRw | (A64F_IMMS(32-HASH_ROT1)), dest, dest, dest); | |
| 888 emit_dnm(as, A64I_EORw, tmp, tmp, dest); | |
| 889 if (irt_isnum(kt)) { | |
| 890 emit_dnm(as, A64I_ADDw, dest, dest, dest); | |
| 891 emit_dn(as, A64I_LSRx | A64F_IMMR(32)|A64F_IMMS(32), dest, dest); | |
| 892 emit_dm(as, A64I_MOVw, tmp, dest); | |
| 893 emit_dn(as, A64I_FMOV_R_D, dest, (key & 31)); | |
| 894 } else { | |
| 895 checkmclim(as); | |
| 896 emit_dm(as, A64I_MOVw, tmp, key); | |
| 897 emit_dnm(as, A64I_EORw, dest, dest, | |
| 898 ra_allock(as, irt_toitype(kt) << 15, allow)); | |
| 899 emit_dn(as, A64I_LSRx | A64F_IMMR(32)|A64F_IMMS(32), dest, dest); | |
| 900 emit_dm(as, A64I_MOVx, dest, key); | |
| 901 } | |
| 902 } | |
| 903 } | |
| 904 } | |
| 905 | |
| 906 static void asm_hrefk(ASMState *as, IRIns *ir) | |
| 907 { | |
| 908 IRIns *kslot = IR(ir->op2); | |
| 909 IRIns *irkey = IR(kslot->op1); | |
| 910 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); | |
| 911 int32_t kofs = ofs + (int32_t)offsetof(Node, key); | |
| 912 int bigofs = !emit_checkofs(A64I_LDRx, kofs); | |
| 913 Reg dest = (ra_used(ir) || bigofs) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; | |
| 914 Reg node = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 915 Reg key, idx = node; | |
| 916 RegSet allow = rset_exclude(RSET_GPR, node); | |
| 917 uint64_t k; | |
| 918 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot"); | |
| 919 if (bigofs) { | |
| 920 idx = dest; | |
| 921 rset_clear(allow, dest); | |
| 922 kofs = (int32_t)offsetof(Node, key); | |
| 923 } else if (ra_hasreg(dest)) { | |
| 924 emit_opk(as, A64I_ADDx, dest, node, ofs, allow); | |
| 925 } | |
| 926 asm_guardcc(as, CC_NE); | |
| 927 if (irt_ispri(irkey->t)) { | |
| 928 k = ~((int64_t)~irt_toitype(irkey->t) << 47); | |
| 929 } else if (irt_isnum(irkey->t)) { | |
| 930 k = ir_knum(irkey)->u64; | |
| 931 } else { | |
| 932 k = ((uint64_t)irt_toitype(irkey->t) << 47) | (uint64_t)ir_kgc(irkey); | |
| 933 } | |
| 934 key = ra_scratch(as, allow); | |
| 935 emit_nm(as, A64I_CMPx, key, ra_allock(as, k, rset_exclude(allow, key))); | |
| 936 emit_lso(as, A64I_LDRx, key, idx, kofs); | |
| 937 if (bigofs) | |
| 938 emit_opk(as, A64I_ADDx, dest, node, ofs, rset_exclude(RSET_GPR, node)); | |
| 939 } | |
| 940 | |
| 941 static void asm_uref(ASMState *as, IRIns *ir) | |
| 942 { | |
| 943 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 944 if (irref_isk(ir->op1)) { | |
| 945 GCfunc *fn = ir_kfunc(IR(ir->op1)); | |
| 946 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; | |
| 947 emit_lsptr(as, A64I_LDRx, dest, v); | |
| 948 } else { | |
| 949 Reg uv = ra_scratch(as, RSET_GPR); | |
| 950 Reg func = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 951 if (ir->o == IR_UREFC) { | |
| 952 asm_guardcc(as, CC_NE); | |
| 953 emit_n(as, (A64I_CMPx^A64I_K12) | A64F_U12(1), RID_TMP); | |
| 954 emit_opk(as, A64I_ADDx, dest, uv, | |
| 955 (int32_t)offsetof(GCupval, tv), RSET_GPR); | |
| 956 emit_lso(as, A64I_LDRB, RID_TMP, uv, (int32_t)offsetof(GCupval, closed)); | |
| 957 } else { | |
| 958 emit_lso(as, A64I_LDRx, dest, uv, (int32_t)offsetof(GCupval, v)); | |
| 959 } | |
| 960 emit_lso(as, A64I_LDRx, uv, func, | |
| 961 (int32_t)offsetof(GCfuncL, uvptr) + 8*(int32_t)(ir->op2 >> 8)); | |
| 962 } | |
| 963 } | |
| 964 | |
| 965 static void asm_fref(ASMState *as, IRIns *ir) | |
| 966 { | |
| 967 UNUSED(as); UNUSED(ir); | |
| 968 lj_assertA(!ra_used(ir), "unfused FREF"); | |
| 969 } | |
| 970 | |
| 971 static void asm_strref(ASMState *as, IRIns *ir) | |
| 972 { | |
| 973 RegSet allow = RSET_GPR; | |
| 974 Reg dest = ra_dest(as, ir, allow); | |
| 975 Reg base = ra_alloc1(as, ir->op1, allow); | |
| 976 IRIns *irr = IR(ir->op2); | |
| 977 int32_t ofs = sizeof(GCstr); | |
| 978 uint32_t m; | |
| 979 rset_clear(allow, base); | |
| 980 if (irref_isk(ir->op2) && (m = emit_isk12(ofs + irr->i))) { | |
| 981 emit_dn(as, A64I_ADDx^m, dest, base); | |
| 982 } else { | |
| 983 emit_dn(as, (A64I_ADDx^A64I_K12) | A64F_U12(ofs), dest, dest); | |
| 984 emit_dnm(as, A64I_ADDx, dest, base, ra_alloc1(as, ir->op2, allow)); | |
| 985 } | |
| 986 } | |
| 987 | |
| 988 /* -- Loads and stores ---------------------------------------------------- */ | |
| 989 | |
| 990 static A64Ins asm_fxloadins(IRIns *ir) | |
| 991 { | |
| 992 switch (irt_type(ir->t)) { | |
| 993 case IRT_I8: return A64I_LDRB ^ A64I_LS_S; | |
| 994 case IRT_U8: return A64I_LDRB; | |
| 995 case IRT_I16: return A64I_LDRH ^ A64I_LS_S; | |
| 996 case IRT_U16: return A64I_LDRH; | |
| 997 case IRT_NUM: return A64I_LDRd; | |
| 998 case IRT_FLOAT: return A64I_LDRs; | |
| 999 default: return irt_is64(ir->t) ? A64I_LDRx : A64I_LDRw; | |
| 1000 } | |
| 1001 } | |
| 1002 | |
| 1003 static A64Ins asm_fxstoreins(IRIns *ir) | |
| 1004 { | |
| 1005 switch (irt_type(ir->t)) { | |
| 1006 case IRT_I8: case IRT_U8: return A64I_STRB; | |
| 1007 case IRT_I16: case IRT_U16: return A64I_STRH; | |
| 1008 case IRT_NUM: return A64I_STRd; | |
| 1009 case IRT_FLOAT: return A64I_STRs; | |
| 1010 default: return irt_is64(ir->t) ? A64I_STRx : A64I_STRw; | |
| 1011 } | |
| 1012 } | |
| 1013 | |
| 1014 static void asm_fload(ASMState *as, IRIns *ir) | |
| 1015 { | |
| 1016 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1017 Reg idx; | |
| 1018 A64Ins ai = asm_fxloadins(ir); | |
| 1019 int32_t ofs; | |
| 1020 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */ | |
| 1021 idx = RID_GL; | |
| 1022 ofs = (ir->op2 << 2) - GG_OFS(g); | |
| 1023 } else { | |
| 1024 idx = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 1025 if (ir->op2 == IRFL_TAB_ARRAY) { | |
| 1026 ofs = asm_fuseabase(as, ir->op1); | |
| 1027 if (ofs) { /* Turn the t->array load into an add for colocated arrays. */ | |
| 1028 emit_dn(as, (A64I_ADDx^A64I_K12) | A64F_U12(ofs), dest, idx); | |
| 1029 return; | |
| 1030 } | |
| 1031 } | |
| 1032 ofs = field_ofs[ir->op2]; | |
| 1033 } | |
| 1034 emit_lso(as, ai, (dest & 31), idx, ofs); | |
| 1035 } | |
| 1036 | |
| 1037 static void asm_fstore(ASMState *as, IRIns *ir) | |
| 1038 { | |
| 1039 if (ir->r != RID_SINK) { | |
| 1040 Reg src = ra_alloc1(as, ir->op2, RSET_GPR); | |
| 1041 IRIns *irf = IR(ir->op1); | |
| 1042 Reg idx = ra_alloc1(as, irf->op1, rset_exclude(RSET_GPR, src)); | |
| 1043 int32_t ofs = field_ofs[irf->op2]; | |
| 1044 emit_lso(as, asm_fxstoreins(ir), (src & 31), idx, ofs); | |
| 1045 } | |
| 1046 } | |
| 1047 | |
| 1048 static void asm_xload(ASMState *as, IRIns *ir) | |
| 1049 { | |
| 1050 Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); | |
| 1051 lj_assertA(!(ir->op2 & IRXLOAD_UNALIGNED), "unaligned XLOAD"); | |
| 1052 asm_fusexref(as, asm_fxloadins(ir), dest, ir->op1, RSET_GPR); | |
| 1053 } | |
| 1054 | |
| 1055 static void asm_xstore(ASMState *as, IRIns *ir) | |
| 1056 { | |
| 1057 if (ir->r != RID_SINK) { | |
| 1058 Reg src = ra_alloc1(as, ir->op2, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); | |
| 1059 asm_fusexref(as, asm_fxstoreins(ir), src, ir->op1, | |
| 1060 rset_exclude(RSET_GPR, src)); | |
| 1061 } | |
| 1062 } | |
| 1063 | |
| 1064 static void asm_ahuvload(ASMState *as, IRIns *ir) | |
| 1065 { | |
| 1066 Reg idx, tmp, type; | |
| 1067 int32_t ofs = 0; | |
| 1068 RegSet gpr = RSET_GPR, allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; | |
| 1069 lj_assertA(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || | |
| 1070 irt_isint(ir->t), | |
| 1071 "bad load type %d", irt_type(ir->t)); | |
| 1072 if (ra_used(ir)) { | |
| 1073 Reg dest = ra_dest(as, ir, allow); | |
| 1074 tmp = irt_isnum(ir->t) ? ra_scratch(as, rset_clear(gpr, dest)) : dest; | |
| 1075 if (irt_isaddr(ir->t)) { | |
| 1076 emit_dn(as, A64I_ANDx^emit_isk13(LJ_GCVMASK, 1), dest, dest); | |
| 1077 } else if (irt_isnum(ir->t)) { | |
| 1078 emit_dn(as, A64I_FMOV_D_R, (dest & 31), tmp); | |
| 1079 } else if (irt_isint(ir->t)) { | |
| 1080 emit_dm(as, A64I_MOVw, dest, dest); | |
| 1081 } | |
| 1082 } else { | |
| 1083 tmp = ra_scratch(as, gpr); | |
| 1084 } | |
| 1085 type = ra_scratch(as, rset_clear(gpr, tmp)); | |
| 1086 idx = asm_fuseahuref(as, ir->op1, &ofs, rset_clear(gpr, type), A64I_LDRx); | |
| 1087 if (ir->o == IR_VLOAD) ofs += 8 * ir->op2; | |
| 1088 /* Always do the type check, even if the load result is unused. */ | |
| 1089 asm_guardcc(as, irt_isnum(ir->t) ? CC_LS : CC_NE); | |
| 1090 if (irt_type(ir->t) >= IRT_NUM) { | |
| 1091 lj_assertA(irt_isinteger(ir->t) || irt_isnum(ir->t), | |
| 1092 "bad load type %d", irt_type(ir->t)); | |
| 1093 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), | |
| 1094 ra_allock(as, LJ_TISNUM << 15, rset_exclude(gpr, idx)), tmp); | |
| 1095 } else if (irt_isaddr(ir->t)) { | |
| 1096 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(-irt_toitype(ir->t)), type); | |
| 1097 emit_dn(as, A64I_ASRx | A64F_IMMR(47), type, tmp); | |
| 1098 } else if (irt_isnil(ir->t)) { | |
| 1099 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(1), tmp); | |
| 1100 } else { | |
| 1101 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), | |
| 1102 ra_allock(as, (irt_toitype(ir->t) << 15) | 0x7fff, gpr), tmp); | |
| 1103 } | |
| 1104 if (ofs & FUSE_REG) | |
| 1105 emit_dnm(as, (A64I_LDRx^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, tmp, idx, (ofs & 31)); | |
| 1106 else | |
| 1107 emit_lso(as, A64I_LDRx, tmp, idx, ofs); | |
| 1108 } | |
| 1109 | |
| 1110 static void asm_ahustore(ASMState *as, IRIns *ir) | |
| 1111 { | |
| 1112 if (ir->r != RID_SINK) { | |
| 1113 RegSet allow = RSET_GPR; | |
| 1114 Reg idx, src = RID_NONE, tmp = RID_TMP, type = RID_NONE; | |
| 1115 int32_t ofs = 0; | |
| 1116 if (irt_isnum(ir->t)) { | |
| 1117 src = ra_alloc1(as, ir->op2, RSET_FPR); | |
| 1118 idx = asm_fuseahuref(as, ir->op1, &ofs, allow, A64I_STRd); | |
| 1119 if (ofs & FUSE_REG) | |
| 1120 emit_dnm(as, (A64I_STRd^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, (src & 31), idx, (ofs &31)); | |
| 1121 else | |
| 1122 emit_lso(as, A64I_STRd, (src & 31), idx, ofs); | |
| 1123 } else { | |
| 1124 if (!irt_ispri(ir->t)) { | |
| 1125 src = ra_alloc1(as, ir->op2, allow); | |
| 1126 rset_clear(allow, src); | |
| 1127 if (irt_isinteger(ir->t)) | |
| 1128 type = ra_allock(as, (uint64_t)(int32_t)LJ_TISNUM << 47, allow); | |
| 1129 else | |
| 1130 type = ra_allock(as, irt_toitype(ir->t), allow); | |
| 1131 } else { | |
| 1132 tmp = type = ra_allock(as, ~((int64_t)~irt_toitype(ir->t)<<47), allow); | |
| 1133 } | |
| 1134 idx = asm_fuseahuref(as, ir->op1, &ofs, rset_exclude(allow, type), | |
| 1135 A64I_STRx); | |
| 1136 if (ofs & FUSE_REG) | |
| 1137 emit_dnm(as, (A64I_STRx^A64I_LS_R)|A64I_LS_UXTWx|A64I_LS_SH, tmp, idx, (ofs & 31)); | |
| 1138 else | |
| 1139 emit_lso(as, A64I_STRx, tmp, idx, ofs); | |
| 1140 if (ra_hasreg(src)) { | |
| 1141 if (irt_isinteger(ir->t)) { | |
| 1142 emit_dnm(as, A64I_ADDx | A64F_EX(A64EX_UXTW), tmp, type, src); | |
| 1143 } else { | |
| 1144 emit_dnm(as, A64I_ADDx | A64F_SH(A64SH_LSL, 47), tmp, src, type); | |
| 1145 } | |
| 1146 } | |
| 1147 } | |
| 1148 } | |
| 1149 } | |
| 1150 | |
| 1151 static void asm_sload(ASMState *as, IRIns *ir) | |
| 1152 { | |
| 1153 int32_t ofs = 8*((int32_t)ir->op1-2); | |
| 1154 IRType1 t = ir->t; | |
| 1155 Reg dest = RID_NONE, base; | |
| 1156 RegSet allow = RSET_GPR; | |
| 1157 lj_assertA(!(ir->op2 & IRSLOAD_PARENT), | |
| 1158 "bad parent SLOAD"); /* Handled by asm_head_side(). */ | |
| 1159 lj_assertA(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK), | |
| 1160 "inconsistent SLOAD variant"); | |
| 1161 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { | |
| 1162 dest = ra_scratch(as, RSET_FPR); | |
| 1163 asm_tointg(as, ir, dest); | |
| 1164 t.irt = IRT_NUM; /* Continue with a regular number type check. */ | |
| 1165 } else if (ra_used(ir)) { | |
| 1166 Reg tmp = RID_NONE; | |
| 1167 if ((ir->op2 & IRSLOAD_CONVERT)) | |
| 1168 tmp = ra_scratch(as, irt_isint(t) ? RSET_FPR : RSET_GPR); | |
| 1169 lj_assertA((irt_isnum(t)) || irt_isint(t) || irt_isaddr(t), | |
| 1170 "bad SLOAD type %d", irt_type(t)); | |
| 1171 dest = ra_dest(as, ir, irt_isnum(t) ? RSET_FPR : allow); | |
| 1172 base = ra_alloc1(as, REF_BASE, rset_clear(allow, dest)); | |
| 1173 if (irt_isaddr(t)) { | |
| 1174 emit_dn(as, A64I_ANDx^emit_isk13(LJ_GCVMASK, 1), dest, dest); | |
| 1175 } else if ((ir->op2 & IRSLOAD_CONVERT)) { | |
| 1176 if (irt_isint(t)) { | |
| 1177 emit_dn(as, A64I_FCVT_S32_F64, dest, (tmp & 31)); | |
| 1178 /* If value is already loaded for type check, move it to FPR. */ | |
| 1179 if ((ir->op2 & IRSLOAD_TYPECHECK)) | |
| 1180 emit_dn(as, A64I_FMOV_D_R, (tmp & 31), dest); | |
| 1181 else | |
| 1182 dest = tmp; | |
| 1183 t.irt = IRT_NUM; /* Check for original type. */ | |
| 1184 } else { | |
| 1185 emit_dn(as, A64I_FCVT_F64_S32, (dest & 31), tmp); | |
| 1186 dest = tmp; | |
| 1187 t.irt = IRT_INT; /* Check for original type. */ | |
| 1188 } | |
| 1189 } else if (irt_isint(t) && (ir->op2 & IRSLOAD_TYPECHECK)) { | |
| 1190 emit_dm(as, A64I_MOVw, dest, dest); | |
| 1191 } | |
| 1192 goto dotypecheck; | |
| 1193 } | |
| 1194 base = ra_alloc1(as, REF_BASE, allow); | |
| 1195 dotypecheck: | |
| 1196 rset_clear(allow, base); | |
| 1197 if ((ir->op2 & IRSLOAD_TYPECHECK)) { | |
| 1198 Reg tmp; | |
| 1199 if (ra_hasreg(dest) && rset_test(RSET_GPR, dest)) { | |
| 1200 tmp = dest; | |
| 1201 } else { | |
| 1202 tmp = ra_scratch(as, allow); | |
| 1203 rset_clear(allow, tmp); | |
| 1204 } | |
| 1205 if (ra_hasreg(dest) && tmp != dest) | |
| 1206 emit_dn(as, A64I_FMOV_D_R, (dest & 31), tmp); | |
| 1207 /* Need type check, even if the load result is unused. */ | |
| 1208 asm_guardcc(as, irt_isnum(t) ? CC_LS : CC_NE); | |
| 1209 if (irt_type(t) >= IRT_NUM) { | |
| 1210 lj_assertA(irt_isinteger(t) || irt_isnum(t), | |
| 1211 "bad SLOAD type %d", irt_type(t)); | |
| 1212 emit_nm(as, A64I_CMPx | A64F_SH(A64SH_LSR, 32), | |
| 1213 ra_allock(as, (ir->op2 & IRSLOAD_KEYINDEX) ? LJ_KEYINDEX : (LJ_TISNUM << 15), allow), tmp); | |
| 1214 } else if (irt_isnil(t)) { | |
| 1215 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(1), tmp); | |
| 1216 } else if (irt_ispri(t)) { | |
| 1217 emit_nm(as, A64I_CMPx, | |
| 1218 ra_allock(as, ~((int64_t)~irt_toitype(t) << 47) , allow), tmp); | |
| 1219 } else { | |
| 1220 Reg type = ra_scratch(as, allow); | |
| 1221 emit_n(as, (A64I_CMNx^A64I_K12) | A64F_U12(-irt_toitype(t)), type); | |
| 1222 emit_dn(as, A64I_ASRx | A64F_IMMR(47), type, tmp); | |
| 1223 } | |
| 1224 emit_lso(as, A64I_LDRx, tmp, base, ofs); | |
| 1225 return; | |
| 1226 } | |
| 1227 if (ra_hasreg(dest)) { | |
| 1228 emit_lso(as, irt_isnum(t) ? A64I_LDRd : | |
| 1229 (irt_isint(t) ? A64I_LDRw : A64I_LDRx), (dest & 31), base, | |
| 1230 ofs ^ ((LJ_BE && irt_isint(t) ? 4 : 0))); | |
| 1231 } | |
| 1232 } | |
| 1233 | |
| 1234 /* -- Allocations --------------------------------------------------------- */ | |
| 1235 | |
| 1236 #if LJ_HASFFI | |
| 1237 static void asm_cnew(ASMState *as, IRIns *ir) | |
| 1238 { | |
| 1239 CTState *cts = ctype_ctsG(J2G(as->J)); | |
| 1240 CTypeID id = (CTypeID)IR(ir->op1)->i; | |
| 1241 CTSize sz; | |
| 1242 CTInfo info = lj_ctype_info(cts, id, &sz); | |
| 1243 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; | |
| 1244 IRRef args[4]; | |
| 1245 RegSet allow = (RSET_GPR & ~RSET_SCRATCH); | |
| 1246 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL), | |
| 1247 "bad CNEW/CNEWI operands"); | |
| 1248 | |
| 1249 as->gcsteps++; | |
| 1250 asm_setupresult(as, ir, ci); /* GCcdata * */ | |
| 1251 /* Initialize immutable cdata object. */ | |
| 1252 if (ir->o == IR_CNEWI) { | |
| 1253 int32_t ofs = sizeof(GCcdata); | |
| 1254 Reg r = ra_alloc1(as, ir->op2, allow); | |
| 1255 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz); | |
| 1256 emit_lso(as, sz == 8 ? A64I_STRx : A64I_STRw, r, RID_RET, ofs); | |
| 1257 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ | |
| 1258 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; | |
| 1259 args[0] = ASMREF_L; /* lua_State *L */ | |
| 1260 args[1] = ir->op1; /* CTypeID id */ | |
| 1261 args[2] = ir->op2; /* CTSize sz */ | |
| 1262 args[3] = ASMREF_TMP1; /* CTSize align */ | |
| 1263 asm_gencall(as, ci, args); | |
| 1264 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); | |
| 1265 return; | |
| 1266 } | |
| 1267 | |
| 1268 /* Initialize gct and ctypeid. lj_mem_newgco() already sets marked. */ | |
| 1269 { | |
| 1270 Reg r = (id < 65536) ? RID_X1 : ra_allock(as, id, allow); | |
| 1271 emit_lso(as, A64I_STRB, RID_TMP, RID_RET, offsetof(GCcdata, gct)); | |
| 1272 emit_lso(as, A64I_STRH, r, RID_RET, offsetof(GCcdata, ctypeid)); | |
| 1273 emit_d(as, A64I_MOVZw | A64F_U16(~LJ_TCDATA), RID_TMP); | |
| 1274 if (id < 65536) emit_d(as, A64I_MOVZw | A64F_U16(id), RID_X1); | |
| 1275 } | |
| 1276 args[0] = ASMREF_L; /* lua_State *L */ | |
| 1277 args[1] = ASMREF_TMP1; /* MSize size */ | |
| 1278 asm_gencall(as, ci, args); | |
| 1279 ra_allockreg(as, (int32_t)(sz+sizeof(GCcdata)), | |
| 1280 ra_releasetmp(as, ASMREF_TMP1)); | |
| 1281 } | |
| 1282 #endif | |
| 1283 | |
| 1284 /* -- Write barriers ------------------------------------------------------ */ | |
| 1285 | |
| 1286 static void asm_tbar(ASMState *as, IRIns *ir) | |
| 1287 { | |
| 1288 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 1289 Reg link = ra_scratch(as, rset_exclude(RSET_GPR, tab)); | |
| 1290 Reg mark = RID_TMP; | |
| 1291 MCLabel l_end = emit_label(as); | |
| 1292 emit_lso(as, A64I_STRx, link, tab, (int32_t)offsetof(GCtab, gclist)); | |
| 1293 emit_lso(as, A64I_STRB, mark, tab, (int32_t)offsetof(GCtab, marked)); | |
| 1294 emit_setgl(as, tab, gc.grayagain); | |
| 1295 emit_dn(as, A64I_ANDw^emit_isk13(~LJ_GC_BLACK, 0), mark, mark); | |
| 1296 emit_getgl(as, link, gc.grayagain); | |
| 1297 emit_cond_branch(as, CC_EQ, l_end); | |
| 1298 emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_BLACK, 0), mark); | |
| 1299 emit_lso(as, A64I_LDRB, mark, tab, (int32_t)offsetof(GCtab, marked)); | |
| 1300 } | |
| 1301 | |
| 1302 static void asm_obar(ASMState *as, IRIns *ir) | |
| 1303 { | |
| 1304 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; | |
| 1305 IRRef args[2]; | |
| 1306 MCLabel l_end; | |
| 1307 RegSet allow = RSET_GPR; | |
| 1308 Reg obj, val, tmp; | |
| 1309 /* No need for other object barriers (yet). */ | |
| 1310 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type"); | |
| 1311 ra_evictset(as, RSET_SCRATCH); | |
| 1312 l_end = emit_label(as); | |
| 1313 args[0] = ASMREF_TMP1; /* global_State *g */ | |
| 1314 args[1] = ir->op1; /* TValue *tv */ | |
| 1315 asm_gencall(as, ci, args); | |
| 1316 emit_dm(as, A64I_MOVx, ra_releasetmp(as, ASMREF_TMP1), RID_GL); | |
| 1317 obj = IR(ir->op1)->r; | |
| 1318 tmp = ra_scratch(as, rset_exclude(allow, obj)); | |
| 1319 emit_cond_branch(as, CC_EQ, l_end); | |
| 1320 emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_BLACK, 0), tmp); | |
| 1321 emit_cond_branch(as, CC_EQ, l_end); | |
| 1322 emit_n(as, A64I_TSTw^emit_isk13(LJ_GC_WHITES, 0), RID_TMP); | |
| 1323 val = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, obj)); | |
| 1324 emit_lso(as, A64I_LDRB, tmp, obj, | |
| 1325 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); | |
| 1326 emit_lso(as, A64I_LDRB, RID_TMP, val, (int32_t)offsetof(GChead, marked)); | |
| 1327 } | |
| 1328 | |
| 1329 /* -- Arithmetic and logic operations ------------------------------------- */ | |
| 1330 | |
| 1331 static void asm_fparith(ASMState *as, IRIns *ir, A64Ins ai) | |
| 1332 { | |
| 1333 Reg dest = ra_dest(as, ir, RSET_FPR); | |
| 1334 Reg right, left = ra_alloc2(as, ir, RSET_FPR); | |
| 1335 right = (left >> 8); left &= 255; | |
| 1336 emit_dnm(as, ai, (dest & 31), (left & 31), (right & 31)); | |
| 1337 } | |
| 1338 | |
| 1339 static void asm_fpunary(ASMState *as, IRIns *ir, A64Ins ai) | |
| 1340 { | |
| 1341 Reg dest = ra_dest(as, ir, RSET_FPR); | |
| 1342 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_FPR); | |
| 1343 emit_dn(as, ai, (dest & 31), (left & 31)); | |
| 1344 } | |
| 1345 | |
| 1346 static void asm_fpmath(ASMState *as, IRIns *ir) | |
| 1347 { | |
| 1348 IRFPMathOp fpm = (IRFPMathOp)ir->op2; | |
| 1349 if (fpm == IRFPM_SQRT) { | |
| 1350 asm_fpunary(as, ir, A64I_FSQRTd); | |
| 1351 } else if (fpm <= IRFPM_TRUNC) { | |
| 1352 asm_fpunary(as, ir, fpm == IRFPM_FLOOR ? A64I_FRINTMd : | |
| 1353 fpm == IRFPM_CEIL ? A64I_FRINTPd : A64I_FRINTZd); | |
| 1354 } else { | |
| 1355 asm_callid(as, ir, IRCALL_lj_vm_floor + fpm); | |
| 1356 } | |
| 1357 } | |
| 1358 | |
| 1359 static int asm_swapops(ASMState *as, IRRef lref, IRRef rref) | |
| 1360 { | |
| 1361 IRIns *ir; | |
| 1362 if (irref_isk(rref)) | |
| 1363 return 0; /* Don't swap constants to the left. */ | |
| 1364 if (irref_isk(lref)) | |
| 1365 return 1; /* But swap constants to the right. */ | |
| 1366 ir = IR(rref); | |
| 1367 if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR) || | |
| 1368 (ir->o == IR_ADD && ir->op1 == ir->op2) || | |
| 1369 (ir->o == IR_CONV && ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT))) | |
| 1370 return 0; /* Don't swap fusable operands to the left. */ | |
| 1371 ir = IR(lref); | |
| 1372 if ((ir->o >= IR_BSHL && ir->o <= IR_BSAR) || | |
| 1373 (ir->o == IR_ADD && ir->op1 == ir->op2) || | |
| 1374 (ir->o == IR_CONV && ir->op2 == ((IRT_I64<<IRCONV_DSH)|IRT_INT|IRCONV_SEXT))) | |
| 1375 return 1; /* But swap fusable operands to the right. */ | |
| 1376 return 0; /* Otherwise don't swap. */ | |
| 1377 } | |
| 1378 | |
| 1379 static void asm_intop(ASMState *as, IRIns *ir, A64Ins ai) | |
| 1380 { | |
| 1381 IRRef lref = ir->op1, rref = ir->op2; | |
| 1382 Reg left, dest = ra_dest(as, ir, RSET_GPR); | |
| 1383 uint32_t m; | |
| 1384 if ((ai & ~A64I_S) != A64I_SUBw && asm_swapops(as, lref, rref)) { | |
| 1385 IRRef tmp = lref; lref = rref; rref = tmp; | |
| 1386 } | |
| 1387 left = ra_hintalloc(as, lref, dest, RSET_GPR); | |
| 1388 if (irt_is64(ir->t)) ai |= A64I_X; | |
| 1389 m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left)); | |
| 1390 if (irt_isguard(ir->t)) { /* For IR_ADDOV etc. */ | |
| 1391 asm_guardcc(as, CC_VS); | |
| 1392 ai |= A64I_S; | |
| 1393 } | |
| 1394 emit_dn(as, ai^m, dest, left); | |
| 1395 } | |
| 1396 | |
| 1397 static void asm_intop_s(ASMState *as, IRIns *ir, A64Ins ai) | |
| 1398 { | |
| 1399 if (as->flagmcp == as->mcp) { /* Drop cmp r, #0. */ | |
| 1400 as->flagmcp = NULL; | |
| 1401 as->mcp++; | |
| 1402 ai |= A64I_S; | |
| 1403 } | |
| 1404 asm_intop(as, ir, ai); | |
| 1405 } | |
| 1406 | |
| 1407 static void asm_intneg(ASMState *as, IRIns *ir) | |
| 1408 { | |
| 1409 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1410 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); | |
| 1411 emit_dm(as, irt_is64(ir->t) ? A64I_NEGx : A64I_NEGw, dest, left); | |
| 1412 } | |
| 1413 | |
| 1414 /* NYI: use add/shift for MUL(OV) with constants. FOLD only does 2^k. */ | |
| 1415 static void asm_intmul(ASMState *as, IRIns *ir) | |
| 1416 { | |
| 1417 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1418 Reg left = ra_alloc1(as, ir->op1, rset_exclude(RSET_GPR, dest)); | |
| 1419 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); | |
| 1420 if (irt_isguard(ir->t)) { /* IR_MULOV */ | |
| 1421 asm_guardcc(as, CC_NE); | |
| 1422 emit_dm(as, A64I_MOVw, dest, dest); /* Zero-extend. */ | |
| 1423 emit_nm(as, A64I_CMPw | A64F_SH(A64SH_ASR, 31), RID_TMP, dest); | |
| 1424 emit_dn(as, A64I_ASRx | A64F_IMMR(32), RID_TMP, dest); | |
| 1425 emit_dnm(as, A64I_SMULL, dest, right, left); | |
| 1426 } else { | |
| 1427 emit_dnm(as, irt_is64(ir->t) ? A64I_MULx : A64I_MULw, dest, left, right); | |
| 1428 } | |
| 1429 } | |
| 1430 | |
| 1431 static void asm_add(ASMState *as, IRIns *ir) | |
| 1432 { | |
| 1433 if (irt_isnum(ir->t)) { | |
| 1434 if (!asm_fusemadd(as, ir, A64I_FMADDd, A64I_FMADDd)) | |
| 1435 asm_fparith(as, ir, A64I_FADDd); | |
| 1436 return; | |
| 1437 } | |
| 1438 asm_intop_s(as, ir, A64I_ADDw); | |
| 1439 } | |
| 1440 | |
| 1441 static void asm_sub(ASMState *as, IRIns *ir) | |
| 1442 { | |
| 1443 if (irt_isnum(ir->t)) { | |
| 1444 if (!asm_fusemadd(as, ir, A64I_FNMSUBd, A64I_FMSUBd)) | |
| 1445 asm_fparith(as, ir, A64I_FSUBd); | |
| 1446 return; | |
| 1447 } | |
| 1448 asm_intop_s(as, ir, A64I_SUBw); | |
| 1449 } | |
| 1450 | |
| 1451 static void asm_mul(ASMState *as, IRIns *ir) | |
| 1452 { | |
| 1453 if (irt_isnum(ir->t)) { | |
| 1454 asm_fparith(as, ir, A64I_FMULd); | |
| 1455 return; | |
| 1456 } | |
| 1457 asm_intmul(as, ir); | |
| 1458 } | |
| 1459 | |
| 1460 #define asm_addov(as, ir) asm_add(as, ir) | |
| 1461 #define asm_subov(as, ir) asm_sub(as, ir) | |
| 1462 #define asm_mulov(as, ir) asm_mul(as, ir) | |
| 1463 | |
| 1464 #define asm_fpdiv(as, ir) asm_fparith(as, ir, A64I_FDIVd) | |
| 1465 #define asm_abs(as, ir) asm_fpunary(as, ir, A64I_FABS) | |
| 1466 | |
| 1467 static void asm_neg(ASMState *as, IRIns *ir) | |
| 1468 { | |
| 1469 if (irt_isnum(ir->t)) { | |
| 1470 asm_fpunary(as, ir, A64I_FNEGd); | |
| 1471 return; | |
| 1472 } | |
| 1473 asm_intneg(as, ir); | |
| 1474 } | |
| 1475 | |
| 1476 static void asm_band(ASMState *as, IRIns *ir) | |
| 1477 { | |
| 1478 A64Ins ai = A64I_ANDw; | |
| 1479 if (asm_fuseandshift(as, ir)) | |
| 1480 return; | |
| 1481 if (as->flagmcp == as->mcp) { | |
| 1482 /* Try to drop cmp r, #0. */ | |
| 1483 as->flagmcp = NULL; | |
| 1484 as->mcp++; | |
| 1485 ai = A64I_ANDSw; | |
| 1486 } | |
| 1487 asm_intop(as, ir, ai); | |
| 1488 } | |
| 1489 | |
| 1490 static void asm_borbxor(ASMState *as, IRIns *ir, A64Ins ai) | |
| 1491 { | |
| 1492 IRRef lref = ir->op1, rref = ir->op2; | |
| 1493 IRIns *irl = IR(lref), *irr = IR(rref); | |
| 1494 if ((canfuse(as, irl) && irl->o == IR_BNOT && !irref_isk(rref)) || | |
| 1495 (canfuse(as, irr) && irr->o == IR_BNOT && !irref_isk(lref))) { | |
| 1496 Reg left, dest = ra_dest(as, ir, RSET_GPR); | |
| 1497 uint32_t m; | |
| 1498 if (irl->o == IR_BNOT) { | |
| 1499 IRRef tmp = lref; lref = rref; rref = tmp; | |
| 1500 } | |
| 1501 left = ra_alloc1(as, lref, RSET_GPR); | |
| 1502 ai |= A64I_ON; | |
| 1503 if (irt_is64(ir->t)) ai |= A64I_X; | |
| 1504 m = asm_fuseopm(as, ai, IR(rref)->op1, rset_exclude(RSET_GPR, left)); | |
| 1505 emit_dn(as, ai^m, dest, left); | |
| 1506 } else { | |
| 1507 asm_intop(as, ir, ai); | |
| 1508 } | |
| 1509 } | |
| 1510 | |
| 1511 static void asm_bor(ASMState *as, IRIns *ir) | |
| 1512 { | |
| 1513 if (asm_fuseorshift(as, ir)) | |
| 1514 return; | |
| 1515 asm_borbxor(as, ir, A64I_ORRw); | |
| 1516 } | |
| 1517 | |
| 1518 #define asm_bxor(as, ir) asm_borbxor(as, ir, A64I_EORw) | |
| 1519 | |
| 1520 static void asm_bnot(ASMState *as, IRIns *ir) | |
| 1521 { | |
| 1522 A64Ins ai = A64I_MVNw; | |
| 1523 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1524 uint32_t m = asm_fuseopm(as, ai, ir->op1, RSET_GPR); | |
| 1525 if (irt_is64(ir->t)) ai |= A64I_X; | |
| 1526 emit_d(as, ai^m, dest); | |
| 1527 } | |
| 1528 | |
| 1529 static void asm_bswap(ASMState *as, IRIns *ir) | |
| 1530 { | |
| 1531 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1532 Reg left = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 1533 emit_dn(as, irt_is64(ir->t) ? A64I_REVx : A64I_REVw, dest, left); | |
| 1534 } | |
| 1535 | |
| 1536 static void asm_bitshift(ASMState *as, IRIns *ir, A64Ins ai, A64Shift sh) | |
| 1537 { | |
| 1538 int32_t shmask = irt_is64(ir->t) ? 63 : 31; | |
| 1539 if (irref_isk(ir->op2)) { /* Constant shifts. */ | |
| 1540 Reg left, dest = ra_dest(as, ir, RSET_GPR); | |
| 1541 int32_t shift = (IR(ir->op2)->i & shmask); | |
| 1542 IRIns *irl = IR(ir->op1); | |
| 1543 if (shmask == 63) ai += A64I_UBFMx - A64I_UBFMw; | |
| 1544 | |
| 1545 /* Fuse BSHL + BSHR/BSAR into UBFM/SBFM aka UBFX/SBFX/UBFIZ/SBFIZ. */ | |
| 1546 if ((sh == A64SH_LSR || sh == A64SH_ASR) && canfuse(as, irl)) { | |
| 1547 if (irl->o == IR_BSHL && irref_isk(irl->op2)) { | |
| 1548 int32_t shift2 = (IR(irl->op2)->i & shmask); | |
| 1549 shift = ((shift - shift2) & shmask); | |
| 1550 shmask -= shift2; | |
| 1551 ir = irl; | |
| 1552 } | |
| 1553 } | |
| 1554 | |
| 1555 left = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 1556 switch (sh) { | |
| 1557 case A64SH_LSL: | |
| 1558 emit_dn(as, ai | A64F_IMMS(shmask-shift) | | |
| 1559 A64F_IMMR((shmask-shift+1)&shmask), dest, left); | |
| 1560 break; | |
| 1561 case A64SH_LSR: case A64SH_ASR: | |
| 1562 emit_dn(as, ai | A64F_IMMS(shmask) | A64F_IMMR(shift), dest, left); | |
| 1563 break; | |
| 1564 case A64SH_ROR: | |
| 1565 emit_dnm(as, ai | A64F_IMMS(shift), dest, left, left); | |
| 1566 break; | |
| 1567 } | |
| 1568 } else { /* Variable-length shifts. */ | |
| 1569 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1570 Reg left = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 1571 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); | |
| 1572 emit_dnm(as, (shmask == 63 ? A64I_SHRx : A64I_SHRw) | A64F_BSH(sh), dest, left, right); | |
| 1573 } | |
| 1574 } | |
| 1575 | |
| 1576 #define asm_bshl(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSL) | |
| 1577 #define asm_bshr(as, ir) asm_bitshift(as, ir, A64I_UBFMw, A64SH_LSR) | |
| 1578 #define asm_bsar(as, ir) asm_bitshift(as, ir, A64I_SBFMw, A64SH_ASR) | |
| 1579 #define asm_bror(as, ir) asm_bitshift(as, ir, A64I_EXTRw, A64SH_ROR) | |
| 1580 #define asm_brol(as, ir) lj_assertA(0, "unexpected BROL") | |
| 1581 | |
| 1582 static void asm_intmin_max(ASMState *as, IRIns *ir, A64CC cc) | |
| 1583 { | |
| 1584 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1585 Reg left = ra_hintalloc(as, ir->op1, dest, RSET_GPR); | |
| 1586 Reg right = ra_alloc1(as, ir->op2, rset_exclude(RSET_GPR, left)); | |
| 1587 emit_dnm(as, A64I_CSELw|A64F_CC(cc), dest, left, right); | |
| 1588 emit_nm(as, A64I_CMPw, left, right); | |
| 1589 } | |
| 1590 | |
| 1591 static void asm_fpmin_max(ASMState *as, IRIns *ir, A64CC fcc) | |
| 1592 { | |
| 1593 Reg dest = (ra_dest(as, ir, RSET_FPR) & 31); | |
| 1594 Reg right, left = ra_alloc2(as, ir, RSET_FPR); | |
| 1595 right = ((left >> 8) & 31); left &= 31; | |
| 1596 emit_dnm(as, A64I_FCSELd | A64F_CC(fcc), dest, right, left); | |
| 1597 emit_nm(as, A64I_FCMPd, left, right); | |
| 1598 } | |
| 1599 | |
| 1600 static void asm_min_max(ASMState *as, IRIns *ir, A64CC cc, A64CC fcc) | |
| 1601 { | |
| 1602 if (irt_isnum(ir->t)) | |
| 1603 asm_fpmin_max(as, ir, fcc); | |
| 1604 else | |
| 1605 asm_intmin_max(as, ir, cc); | |
| 1606 } | |
| 1607 | |
| 1608 #define asm_min(as, ir) asm_min_max(as, ir, CC_LT, CC_PL) | |
| 1609 #define asm_max(as, ir) asm_min_max(as, ir, CC_GT, CC_LE) | |
| 1610 | |
| 1611 /* -- Comparisons --------------------------------------------------------- */ | |
| 1612 | |
| 1613 /* Map of comparisons to flags. ORDER IR. */ | |
| 1614 static const uint8_t asm_compmap[IR_ABC+1] = { | |
| 1615 /* op FP swp int cc FP cc */ | |
| 1616 /* LT */ CC_GE + (CC_HS << 4), | |
| 1617 /* GE x */ CC_LT + (CC_HI << 4), | |
| 1618 /* LE */ CC_GT + (CC_HI << 4), | |
| 1619 /* GT x */ CC_LE + (CC_HS << 4), | |
| 1620 /* ULT x */ CC_HS + (CC_LS << 4), | |
| 1621 /* UGE */ CC_LO + (CC_LO << 4), | |
| 1622 /* ULE x */ CC_HI + (CC_LO << 4), | |
| 1623 /* UGT */ CC_LS + (CC_LS << 4), | |
| 1624 /* EQ */ CC_NE + (CC_NE << 4), | |
| 1625 /* NE */ CC_EQ + (CC_EQ << 4), | |
| 1626 /* ABC */ CC_LS + (CC_LS << 4) /* Same as UGT. */ | |
| 1627 }; | |
| 1628 | |
| 1629 /* FP comparisons. */ | |
| 1630 static void asm_fpcomp(ASMState *as, IRIns *ir) | |
| 1631 { | |
| 1632 Reg left, right; | |
| 1633 A64Ins ai; | |
| 1634 int swp = ((ir->o ^ (ir->o >> 2)) & ~(ir->o >> 3) & 1); | |
| 1635 if (!swp && irref_isk(ir->op2) && ir_knum(IR(ir->op2))->u64 == 0) { | |
| 1636 left = (ra_alloc1(as, ir->op1, RSET_FPR) & 31); | |
| 1637 right = 0; | |
| 1638 ai = A64I_FCMPZd; | |
| 1639 } else { | |
| 1640 left = ra_alloc2(as, ir, RSET_FPR); | |
| 1641 if (swp) { | |
| 1642 right = (left & 31); left = ((left >> 8) & 31); | |
| 1643 } else { | |
| 1644 right = ((left >> 8) & 31); left &= 31; | |
| 1645 } | |
| 1646 ai = A64I_FCMPd; | |
| 1647 } | |
| 1648 asm_guardcc(as, (asm_compmap[ir->o] >> 4)); | |
| 1649 emit_nm(as, ai, left, right); | |
| 1650 } | |
| 1651 | |
| 1652 /* Integer comparisons. */ | |
| 1653 static void asm_intcomp(ASMState *as, IRIns *ir) | |
| 1654 { | |
| 1655 A64CC oldcc, cc = (asm_compmap[ir->o] & 15); | |
| 1656 A64Ins ai = irt_is64(ir->t) ? A64I_CMPx : A64I_CMPw; | |
| 1657 IRRef lref = ir->op1, rref = ir->op2; | |
| 1658 Reg left; | |
| 1659 uint32_t m; | |
| 1660 int cmpprev0 = 0; | |
| 1661 lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) || | |
| 1662 irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t), | |
| 1663 "bad comparison data type %d", irt_type(ir->t)); | |
| 1664 if (asm_swapops(as, lref, rref)) { | |
| 1665 IRRef tmp = lref; lref = rref; rref = tmp; | |
| 1666 if (cc >= CC_GE) cc ^= 7; /* LT <-> GT, LE <-> GE */ | |
| 1667 else if (cc > CC_NE) cc ^= 11; /* LO <-> HI, LS <-> HS */ | |
| 1668 } | |
| 1669 oldcc = cc; | |
| 1670 if (irref_isk(rref) && get_k64val(as, rref) == 0) { | |
| 1671 IRIns *irl = IR(lref); | |
| 1672 if (cc == CC_GE) cc = CC_PL; | |
| 1673 else if (cc == CC_LT) cc = CC_MI; | |
| 1674 else if (cc > CC_NE) goto nocombine; /* Other conds don't work with tst. */ | |
| 1675 cmpprev0 = (irl+1 == ir); | |
| 1676 /* Combine and-cmp-bcc into tbz/tbnz or and-cmp into tst. */ | |
| 1677 if (cmpprev0 && irl->o == IR_BAND && !ra_used(irl)) { | |
| 1678 IRRef blref = irl->op1, brref = irl->op2; | |
| 1679 uint32_t m2 = 0; | |
| 1680 Reg bleft; | |
| 1681 if (asm_swapops(as, blref, brref)) { | |
| 1682 Reg tmp = blref; blref = brref; brref = tmp; | |
| 1683 } | |
| 1684 if (irref_isk(brref)) { | |
| 1685 uint64_t k = get_k64val(as, brref); | |
| 1686 if (k && !(k & (k-1)) && (cc == CC_EQ || cc == CC_NE)) { | |
| 1687 asm_guardtnb(as, cc == CC_EQ ? A64I_TBZ : A64I_TBNZ, | |
| 1688 ra_alloc1(as, blref, RSET_GPR), emit_ctz64(k)); | |
| 1689 return; | |
| 1690 } | |
| 1691 m2 = emit_isk13(k, irt_is64(irl->t)); | |
| 1692 } | |
| 1693 bleft = ra_alloc1(as, blref, RSET_GPR); | |
| 1694 ai = (irt_is64(irl->t) ? A64I_TSTx : A64I_TSTw); | |
| 1695 if (!m2) | |
| 1696 m2 = asm_fuseopm(as, ai, brref, rset_exclude(RSET_GPR, bleft)); | |
| 1697 asm_guardcc(as, cc); | |
| 1698 emit_n(as, ai^m2, bleft); | |
| 1699 return; | |
| 1700 } | |
| 1701 if (cc == CC_EQ || cc == CC_NE) { | |
| 1702 /* Combine cmp-bcc into cbz/cbnz. */ | |
| 1703 ai = cc == CC_EQ ? A64I_CBZ : A64I_CBNZ; | |
| 1704 if (irt_is64(ir->t)) ai |= A64I_X; | |
| 1705 asm_guardcnb(as, ai, ra_alloc1(as, lref, RSET_GPR)); | |
| 1706 return; | |
| 1707 } | |
| 1708 } | |
| 1709 nocombine: | |
| 1710 left = ra_alloc1(as, lref, RSET_GPR); | |
| 1711 m = asm_fuseopm(as, ai, rref, rset_exclude(RSET_GPR, left)); | |
| 1712 asm_guardcc(as, cc); | |
| 1713 emit_n(as, ai^m, left); | |
| 1714 /* Signed comparison with zero and referencing previous ins? */ | |
| 1715 if (cmpprev0 && (oldcc <= CC_NE || oldcc >= CC_GE)) | |
| 1716 as->flagmcp = as->mcp; /* Allow elimination of the compare. */ | |
| 1717 } | |
| 1718 | |
| 1719 static void asm_comp(ASMState *as, IRIns *ir) | |
| 1720 { | |
| 1721 if (irt_isnum(ir->t)) | |
| 1722 asm_fpcomp(as, ir); | |
| 1723 else | |
| 1724 asm_intcomp(as, ir); | |
| 1725 } | |
| 1726 | |
| 1727 #define asm_equal(as, ir) asm_comp(as, ir) | |
| 1728 | |
| 1729 /* -- Split register ops -------------------------------------------------- */ | |
| 1730 | |
| 1731 /* Hiword op of a split 64/64 bit op. Previous op is the loword op. */ | |
| 1732 static void asm_hiop(ASMState *as, IRIns *ir) | |
| 1733 { | |
| 1734 /* HIOP is marked as a store because it needs its own DCE logic. */ | |
| 1735 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ | |
| 1736 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; | |
| 1737 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ | |
| 1738 switch ((ir-1)->o) { | |
| 1739 case IR_CALLN: | |
| 1740 case IR_CALLL: | |
| 1741 case IR_CALLS: | |
| 1742 case IR_CALLXS: | |
| 1743 if (!uselo) | |
| 1744 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ | |
| 1745 break; | |
| 1746 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break; | |
| 1747 } | |
| 1748 } | |
| 1749 | |
| 1750 /* -- Profiling ----------------------------------------------------------- */ | |
| 1751 | |
| 1752 static void asm_prof(ASMState *as, IRIns *ir) | |
| 1753 { | |
| 1754 uint32_t k = emit_isk13(HOOK_PROFILE, 0); | |
| 1755 lj_assertA(k != 0, "HOOK_PROFILE does not fit in K13"); | |
| 1756 UNUSED(ir); | |
| 1757 asm_guardcc(as, CC_NE); | |
| 1758 emit_n(as, A64I_TSTw^k, RID_TMP); | |
| 1759 emit_lsptr(as, A64I_LDRB, RID_TMP, (void *)&J2G(as->J)->hookmask); | |
| 1760 } | |
| 1761 | |
| 1762 /* -- Stack handling ------------------------------------------------------ */ | |
| 1763 | |
| 1764 /* Check Lua stack size for overflow. Use exit handler as fallback. */ | |
| 1765 static void asm_stack_check(ASMState *as, BCReg topslot, | |
| 1766 IRIns *irp, RegSet allow, ExitNo exitno) | |
| 1767 { | |
| 1768 Reg pbase; | |
| 1769 uint32_t k; | |
| 1770 if (irp) { | |
| 1771 if (!ra_hasspill(irp->s)) { | |
| 1772 pbase = irp->r; | |
| 1773 lj_assertA(ra_hasreg(pbase), "base reg lost"); | |
| 1774 } else if (allow) { | |
| 1775 pbase = rset_pickbot(allow); | |
| 1776 } else { | |
| 1777 pbase = RID_RET; | |
| 1778 emit_lso(as, A64I_LDRx, RID_RET, RID_SP, 0); /* Restore temp register. */ | |
| 1779 } | |
| 1780 } else { | |
| 1781 pbase = RID_BASE; | |
| 1782 } | |
| 1783 emit_cond_branch(as, CC_LS, asm_exitstub_addr(as, exitno)); | |
| 1784 k = emit_isk12((8*topslot)); | |
| 1785 lj_assertA(k, "slot offset %d does not fit in K12", 8*topslot); | |
| 1786 emit_n(as, A64I_CMPx^k, RID_TMP); | |
| 1787 emit_dnm(as, A64I_SUBx, RID_TMP, RID_TMP, pbase); | |
| 1788 emit_lso(as, A64I_LDRx, RID_TMP, RID_TMP, | |
| 1789 (int32_t)offsetof(lua_State, maxstack)); | |
| 1790 if (irp) { /* Must not spill arbitrary registers in head of side trace. */ | |
| 1791 if (ra_hasspill(irp->s)) | |
| 1792 emit_lso(as, A64I_LDRx, pbase, RID_SP, sps_scale(irp->s)); | |
| 1793 emit_lso(as, A64I_LDRx, RID_TMP, RID_GL, glofs(as, &J2G(as->J)->cur_L)); | |
| 1794 if (ra_hasspill(irp->s) && !allow) | |
| 1795 emit_lso(as, A64I_STRx, RID_RET, RID_SP, 0); /* Save temp register. */ | |
| 1796 } else { | |
| 1797 emit_getgl(as, RID_TMP, cur_L); | |
| 1798 } | |
| 1799 } | |
| 1800 | |
| 1801 /* Restore Lua stack from on-trace state. */ | |
| 1802 static void asm_stack_restore(ASMState *as, SnapShot *snap) | |
| 1803 { | |
| 1804 SnapEntry *map = &as->T->snapmap[snap->mapofs]; | |
| 1805 #ifdef LUA_USE_ASSERT | |
| 1806 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2]; | |
| 1807 #endif | |
| 1808 MSize n, nent = snap->nent; | |
| 1809 /* Store the value of all modified slots to the Lua stack. */ | |
| 1810 for (n = 0; n < nent; n++) { | |
| 1811 SnapEntry sn = map[n]; | |
| 1812 BCReg s = snap_slot(sn); | |
| 1813 int32_t ofs = 8*((int32_t)s-1-LJ_FR2); | |
| 1814 IRRef ref = snap_ref(sn); | |
| 1815 IRIns *ir = IR(ref); | |
| 1816 if ((sn & SNAP_NORESTORE)) | |
| 1817 continue; | |
| 1818 if ((sn & SNAP_KEYINDEX)) { | |
| 1819 RegSet allow = rset_exclude(RSET_GPR, RID_BASE); | |
| 1820 Reg r = irref_isk(ref) ? ra_allock(as, ir->i, allow) : | |
| 1821 ra_alloc1(as, ref, allow); | |
| 1822 rset_clear(allow, r); | |
| 1823 emit_lso(as, A64I_STRw, r, RID_BASE, ofs); | |
| 1824 emit_lso(as, A64I_STRw, ra_allock(as, LJ_KEYINDEX, allow), RID_BASE, ofs+4); | |
| 1825 } else if (irt_isnum(ir->t)) { | |
| 1826 Reg src = ra_alloc1(as, ref, RSET_FPR); | |
| 1827 emit_lso(as, A64I_STRd, (src & 31), RID_BASE, ofs); | |
| 1828 } else { | |
| 1829 asm_tvstore64(as, RID_BASE, ofs, ref); | |
| 1830 } | |
| 1831 checkmclim(as); | |
| 1832 } | |
| 1833 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot"); | |
| 1834 } | |
| 1835 | |
| 1836 /* -- GC handling --------------------------------------------------------- */ | |
| 1837 | |
| 1838 /* Marker to prevent patching the GC check exit. */ | |
| 1839 #define ARM64_NOPATCH_GC_CHECK \ | |
| 1840 (A64I_ORRx|A64F_D(RID_TMP)|A64F_M(RID_TMP)|A64F_N(RID_TMP)) | |
| 1841 | |
| 1842 /* Check GC threshold and do one or more GC steps. */ | |
| 1843 static void asm_gc_check(ASMState *as) | |
| 1844 { | |
| 1845 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; | |
| 1846 IRRef args[2]; | |
| 1847 MCLabel l_end; | |
| 1848 Reg tmp2; | |
| 1849 ra_evictset(as, RSET_SCRATCH); | |
| 1850 l_end = emit_label(as); | |
| 1851 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ | |
| 1852 asm_guardcnb(as, A64I_CBNZ, RID_RET); /* Assumes asm_snap_prep() is done. */ | |
| 1853 *--as->mcp = ARM64_NOPATCH_GC_CHECK; | |
| 1854 args[0] = ASMREF_TMP1; /* global_State *g */ | |
| 1855 args[1] = ASMREF_TMP2; /* MSize steps */ | |
| 1856 asm_gencall(as, ci, args); | |
| 1857 emit_dm(as, A64I_MOVx, ra_releasetmp(as, ASMREF_TMP1), RID_GL); | |
| 1858 tmp2 = ra_releasetmp(as, ASMREF_TMP2); | |
| 1859 emit_loadi(as, tmp2, as->gcsteps); | |
| 1860 /* Jump around GC step if GC total < GC threshold. */ | |
| 1861 emit_cond_branch(as, CC_LS, l_end); | |
| 1862 emit_nm(as, A64I_CMPx, RID_TMP, tmp2); | |
| 1863 emit_getgl(as, tmp2, gc.threshold); | |
| 1864 emit_getgl(as, RID_TMP, gc.total); | |
| 1865 as->gcsteps = 0; | |
| 1866 checkmclim(as); | |
| 1867 } | |
| 1868 | |
| 1869 /* -- Loop handling ------------------------------------------------------- */ | |
| 1870 | |
| 1871 /* Fixup the loop branch. */ | |
| 1872 static void asm_loop_fixup(ASMState *as) | |
| 1873 { | |
| 1874 MCode *p = as->mctop; | |
| 1875 MCode *target = as->mcp; | |
| 1876 if (as->loopinv) { /* Inverted loop branch? */ | |
| 1877 uint32_t mask = (p[-2] & 0x7e000000) == 0x36000000 ? 0x3fffu : 0x7ffffu; | |
| 1878 ptrdiff_t delta = target - (p - 2); | |
| 1879 /* asm_guard* already inverted the bcc/tnb/cnb and patched the final b. */ | |
| 1880 p[-2] |= ((uint32_t)delta & mask) << 5; | |
| 1881 } else { | |
| 1882 ptrdiff_t delta = target - (p - 1); | |
| 1883 p[-1] = A64I_B | A64F_S26(delta); | |
| 1884 } | |
| 1885 } | |
| 1886 | |
| 1887 /* Fixup the tail of the loop. */ | |
| 1888 static void asm_loop_tail_fixup(ASMState *as) | |
| 1889 { | |
| 1890 UNUSED(as); /* Nothing to do. */ | |
| 1891 } | |
| 1892 | |
| 1893 /* -- Head of trace ------------------------------------------------------- */ | |
| 1894 | |
| 1895 /* Reload L register from g->cur_L. */ | |
| 1896 static void asm_head_lreg(ASMState *as) | |
| 1897 { | |
| 1898 IRIns *ir = IR(ASMREF_L); | |
| 1899 if (ra_used(ir)) { | |
| 1900 Reg r = ra_dest(as, ir, RSET_GPR); | |
| 1901 emit_getgl(as, r, cur_L); | |
| 1902 ra_evictk(as); | |
| 1903 } | |
| 1904 } | |
| 1905 | |
| 1906 /* Coalesce BASE register for a root trace. */ | |
| 1907 static void asm_head_root_base(ASMState *as) | |
| 1908 { | |
| 1909 IRIns *ir; | |
| 1910 asm_head_lreg(as); | |
| 1911 ir = IR(REF_BASE); | |
| 1912 if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t))) | |
| 1913 ra_spill(as, ir); | |
| 1914 ra_destreg(as, ir, RID_BASE); | |
| 1915 } | |
| 1916 | |
| 1917 /* Coalesce BASE register for a side trace. */ | |
| 1918 static Reg asm_head_side_base(ASMState *as, IRIns *irp) | |
| 1919 { | |
| 1920 IRIns *ir; | |
| 1921 asm_head_lreg(as); | |
| 1922 ir = IR(REF_BASE); | |
| 1923 if (ra_hasreg(ir->r) && (rset_test(as->modset, ir->r) || irt_ismarked(ir->t))) | |
| 1924 ra_spill(as, ir); | |
| 1925 if (ra_hasspill(irp->s)) { | |
| 1926 return ra_dest(as, ir, RSET_GPR); | |
| 1927 } else { | |
| 1928 Reg r = irp->r; | |
| 1929 lj_assertA(ra_hasreg(r), "base reg lost"); | |
| 1930 if (r != ir->r && !rset_test(as->freeset, r)) | |
| 1931 ra_restore(as, regcost_ref(as->cost[r])); | |
| 1932 ra_destreg(as, ir, r); | |
| 1933 return r; | |
| 1934 } | |
| 1935 } | |
| 1936 | |
| 1937 /* -- Tail of trace ------------------------------------------------------- */ | |
| 1938 | |
| 1939 /* Fixup the tail code. */ | |
| 1940 static void asm_tail_fixup(ASMState *as, TraceNo lnk) | |
| 1941 { | |
| 1942 MCode *p = as->mctop; | |
| 1943 MCode *target; | |
| 1944 /* Undo the sp adjustment in BC_JLOOP when exiting to the interpreter. */ | |
| 1945 int32_t spadj = as->T->spadjust + (lnk ? 0 : sps_scale(SPS_FIXED)); | |
| 1946 if (spadj == 0) { | |
| 1947 *--p = A64I_LE(A64I_NOP); | |
| 1948 as->mctop = p; | |
| 1949 } else { | |
| 1950 /* Patch stack adjustment. */ | |
| 1951 uint32_t k = emit_isk12(spadj); | |
| 1952 lj_assertA(k, "stack adjustment %d does not fit in K12", spadj); | |
| 1953 p[-2] = (A64I_ADDx^k) | A64F_D(RID_SP) | A64F_N(RID_SP); | |
| 1954 } | |
| 1955 /* Patch exit branch. */ | |
| 1956 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; | |
| 1957 p[-1] = A64I_B | A64F_S26((target-p)+1); | |
| 1958 } | |
| 1959 | |
| 1960 /* Prepare tail of code. */ | |
| 1961 static void asm_tail_prep(ASMState *as) | |
| 1962 { | |
| 1963 MCode *p = as->mctop - 1; /* Leave room for exit branch. */ | |
| 1964 if (as->loopref) { | |
| 1965 as->invmcp = as->mcp = p; | |
| 1966 } else { | |
| 1967 as->mcp = p-1; /* Leave room for stack pointer adjustment. */ | |
| 1968 as->invmcp = NULL; | |
| 1969 } | |
| 1970 *p = 0; /* Prevent load/store merging. */ | |
| 1971 } | |
| 1972 | |
| 1973 /* -- Trace setup --------------------------------------------------------- */ | |
| 1974 | |
| 1975 /* Ensure there are enough stack slots for call arguments. */ | |
| 1976 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) | |
| 1977 { | |
| 1978 IRRef args[CCI_NARGS_MAX*2]; | |
| 1979 uint32_t i, nargs = CCI_XNARGS(ci); | |
| 1980 int nslots = 0, ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; | |
| 1981 asm_collectargs(as, ir, ci, args); | |
| 1982 for (i = 0; i < nargs; i++) { | |
| 1983 if (args[i] && irt_isfp(IR(args[i])->t)) { | |
| 1984 if (nfpr > 0) nfpr--; else nslots += 2; | |
| 1985 } else { | |
| 1986 if (ngpr > 0) ngpr--; else nslots += 2; | |
| 1987 } | |
| 1988 } | |
| 1989 if (nslots > as->evenspill) /* Leave room for args in stack slots. */ | |
| 1990 as->evenspill = nslots; | |
| 1991 return REGSP_HINT(RID_RET); | |
| 1992 } | |
| 1993 | |
| 1994 static void asm_setup_target(ASMState *as) | |
| 1995 { | |
| 1996 /* May need extra exit for asm_stack_check on side traces. */ | |
| 1997 asm_exitstub_setup(as, as->T->nsnap + (as->parent ? 1 : 0)); | |
| 1998 } | |
| 1999 | |
| 2000 #if LJ_BE | |
| 2001 /* ARM64 instructions are always little-endian. Swap for ARM64BE. */ | |
| 2002 static void asm_mcode_fixup(MCode *mcode, MSize size) | |
| 2003 { | |
| 2004 MCode *pe = (MCode *)((char *)mcode + size); | |
| 2005 while (mcode < pe) { | |
| 2006 MCode ins = *mcode; | |
| 2007 *mcode++ = lj_bswap(ins); | |
| 2008 } | |
| 2009 } | |
| 2010 #define LJ_TARGET_MCODE_FIXUP 1 | |
| 2011 #endif | |
| 2012 | |
| 2013 /* -- Trace patching ------------------------------------------------------ */ | |
| 2014 | |
| 2015 /* Patch exit jumps of existing machine code to a new target. */ | |
| 2016 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) | |
| 2017 { | |
| 2018 MCode *p = T->mcode; | |
| 2019 MCode *pe = (MCode *)((char *)p + T->szmcode); | |
| 2020 MCode *cstart = NULL; | |
| 2021 MCode *mcarea = lj_mcode_patch(J, p, 0); | |
| 2022 MCode *px = exitstub_trace_addr(T, exitno); | |
| 2023 int patchlong = 1; | |
| 2024 /* Note: this assumes a trace exit is only ever patched once. */ | |
| 2025 for (; p < pe; p++) { | |
| 2026 /* Look for exitstub branch, replace with branch to target. */ | |
| 2027 ptrdiff_t delta = target - p; | |
| 2028 MCode ins = A64I_LE(*p); | |
| 2029 if ((ins & 0xff000000u) == 0x54000000u && | |
| 2030 ((ins ^ ((px-p)<<5)) & 0x00ffffe0u) == 0) { | |
| 2031 /* Patch bcc, if within range. */ | |
| 2032 if (A64F_S_OK(delta, 19)) { | |
| 2033 *p = A64I_LE((ins & 0xff00001fu) | A64F_S19(delta)); | |
| 2034 if (!cstart) cstart = p; | |
| 2035 } | |
| 2036 } else if ((ins & 0xfc000000u) == 0x14000000u && | |
| 2037 ((ins ^ (px-p)) & 0x03ffffffu) == 0) { | |
| 2038 /* Patch b. */ | |
| 2039 lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range"); | |
| 2040 *p = A64I_LE((ins & 0xfc000000u) | A64F_S26(delta)); | |
| 2041 if (!cstart) cstart = p; | |
| 2042 } else if ((ins & 0x7e000000u) == 0x34000000u && | |
| 2043 ((ins ^ ((px-p)<<5)) & 0x00ffffe0u) == 0) { | |
| 2044 /* Patch cbz/cbnz, if within range. */ | |
| 2045 if (p[-1] == ARM64_NOPATCH_GC_CHECK) { | |
| 2046 patchlong = 0; | |
| 2047 } else if (A64F_S_OK(delta, 19)) { | |
| 2048 *p = A64I_LE((ins & 0xff00001fu) | A64F_S19(delta)); | |
| 2049 if (!cstart) cstart = p; | |
| 2050 } | |
| 2051 } else if ((ins & 0x7e000000u) == 0x36000000u && | |
| 2052 ((ins ^ ((px-p)<<5)) & 0x0007ffe0u) == 0) { | |
| 2053 /* Patch tbz/tbnz, if within range. */ | |
| 2054 if (A64F_S_OK(delta, 14)) { | |
| 2055 *p = A64I_LE((ins & 0xfff8001fu) | A64F_S14(delta)); | |
| 2056 if (!cstart) cstart = p; | |
| 2057 } | |
| 2058 } | |
| 2059 } | |
| 2060 /* Always patch long-range branch in exit stub itself. Except, if we can't. */ | |
| 2061 if (patchlong) { | |
| 2062 ptrdiff_t delta = target - px; | |
| 2063 lj_assertJ(A64F_S_OK(delta, 26), "branch target out of range"); | |
| 2064 *px = A64I_B | A64F_S26(delta); | |
| 2065 if (!cstart) cstart = px; | |
| 2066 } | |
| 2067 if (cstart) lj_mcode_sync(cstart, px+1); | |
| 2068 lj_mcode_patch(J, mcarea, 1); | |
| 2069 } | |
| 2070 |