Mercurial
comparison third_party/luajit/src/lj_asm_x86.h @ 178:94705b5986b3
[ThirdParty] Added WRK and luajit for load testing.
| author | MrJuneJune <me@mrjunejune.com> |
|---|---|
| date | Thu, 22 Jan 2026 20:10:30 -0800 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| 177:24fe8ff94056 | 178:94705b5986b3 |
|---|---|
| 1 /* | |
| 2 ** x86/x64 IR assembler (SSA IR -> machine code). | |
| 3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h | |
| 4 */ | |
| 5 | |
| 6 /* -- Guard handling ------------------------------------------------------ */ | |
| 7 | |
| 8 /* Generate an exit stub group at the bottom of the reserved MCode memory. */ | |
| 9 static MCode *asm_exitstub_gen(ASMState *as, ExitNo group) | |
| 10 { | |
| 11 ExitNo i, groupofs = (group*EXITSTUBS_PER_GROUP) & 0xff; | |
| 12 MCode *mxp = as->mcbot; | |
| 13 MCode *mxpstart = mxp; | |
| 14 if (mxp + (2+2)*EXITSTUBS_PER_GROUP+8+5 >= as->mctop) | |
| 15 asm_mclimit(as); | |
| 16 /* Push low byte of exitno for each exit stub. */ | |
| 17 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)groupofs; | |
| 18 for (i = 1; i < EXITSTUBS_PER_GROUP; i++) { | |
| 19 *mxp++ = XI_JMPs; *mxp++ = (MCode)((2+2)*(EXITSTUBS_PER_GROUP - i) - 2); | |
| 20 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)(groupofs + i); | |
| 21 } | |
| 22 /* Push the high byte of the exitno for each exit stub group. */ | |
| 23 *mxp++ = XI_PUSHi8; *mxp++ = (MCode)((group*EXITSTUBS_PER_GROUP)>>8); | |
| 24 #if !LJ_GC64 | |
| 25 /* Store DISPATCH at original stack slot 0. Account for the two push ops. */ | |
| 26 *mxp++ = XI_MOVmi; | |
| 27 *mxp++ = MODRM(XM_OFS8, 0, RID_ESP); | |
| 28 *mxp++ = MODRM(XM_SCALE1, RID_ESP, RID_ESP); | |
| 29 *mxp++ = 2*sizeof(void *); | |
| 30 *(int32_t *)mxp = ptr2addr(J2GG(as->J)->dispatch); mxp += 4; | |
| 31 #endif | |
| 32 /* Jump to exit handler which fills in the ExitState. */ | |
| 33 *mxp++ = XI_JMP; mxp += 4; | |
| 34 *((int32_t *)(mxp-4)) = jmprel(as->J, mxp, (MCode *)(void *)lj_vm_exit_handler); | |
| 35 /* Commit the code for this group (even if assembly fails later on). */ | |
| 36 lj_mcode_commitbot(as->J, mxp); | |
| 37 as->mcbot = mxp; | |
| 38 as->mclim = as->mcbot + MCLIM_REDZONE; | |
| 39 return mxpstart; | |
| 40 } | |
| 41 | |
| 42 /* Setup all needed exit stubs. */ | |
| 43 static void asm_exitstub_setup(ASMState *as, ExitNo nexits) | |
| 44 { | |
| 45 ExitNo i; | |
| 46 if (nexits >= EXITSTUBS_PER_GROUP*LJ_MAX_EXITSTUBGR) | |
| 47 lj_trace_err(as->J, LJ_TRERR_SNAPOV); | |
| 48 for (i = 0; i < (nexits+EXITSTUBS_PER_GROUP-1)/EXITSTUBS_PER_GROUP; i++) | |
| 49 if (as->J->exitstubgroup[i] == NULL) | |
| 50 as->J->exitstubgroup[i] = asm_exitstub_gen(as, i); | |
| 51 } | |
| 52 | |
| 53 /* Emit conditional branch to exit for guard. | |
| 54 ** It's important to emit this *after* all registers have been allocated, | |
| 55 ** because rematerializations may invalidate the flags. | |
| 56 */ | |
| 57 static void asm_guardcc(ASMState *as, int cc) | |
| 58 { | |
| 59 MCode *target = exitstub_addr(as->J, as->snapno); | |
| 60 MCode *p = as->mcp; | |
| 61 if (LJ_UNLIKELY(p == as->invmcp)) { | |
| 62 as->loopinv = 1; | |
| 63 *(int32_t *)(p+1) = jmprel(as->J, p+5, target); | |
| 64 target = p; | |
| 65 cc ^= 1; | |
| 66 if (as->realign) { | |
| 67 if (LJ_GC64 && LJ_UNLIKELY(as->mrm.base == RID_RIP)) | |
| 68 as->mrm.ofs += 2; /* Fixup RIP offset for pending fused load. */ | |
| 69 emit_sjcc(as, cc, target); | |
| 70 return; | |
| 71 } | |
| 72 } | |
| 73 if (LJ_GC64 && LJ_UNLIKELY(as->mrm.base == RID_RIP)) | |
| 74 as->mrm.ofs += 6; /* Fixup RIP offset for pending fused load. */ | |
| 75 emit_jcc(as, cc, target); | |
| 76 } | |
| 77 | |
| 78 /* -- Memory operand fusion ----------------------------------------------- */ | |
| 79 | |
| 80 /* Limit linear search to this distance. Avoids O(n^2) behavior. */ | |
| 81 #define CONFLICT_SEARCH_LIM 31 | |
| 82 | |
| 83 /* Check if a reference is a signed 32 bit constant. */ | |
| 84 static int asm_isk32(ASMState *as, IRRef ref, int32_t *k) | |
| 85 { | |
| 86 if (irref_isk(ref)) { | |
| 87 IRIns *ir = IR(ref); | |
| 88 #if LJ_GC64 | |
| 89 if (ir->o == IR_KNULL || !irt_is64(ir->t)) { | |
| 90 *k = ir->i; | |
| 91 return 1; | |
| 92 } else if (checki32((int64_t)ir_k64(ir)->u64)) { | |
| 93 *k = (int32_t)ir_k64(ir)->u64; | |
| 94 return 1; | |
| 95 } | |
| 96 #else | |
| 97 if (ir->o != IR_KINT64) { | |
| 98 *k = ir->i; | |
| 99 return 1; | |
| 100 } else if (checki32((int64_t)ir_kint64(ir)->u64)) { | |
| 101 *k = (int32_t)ir_kint64(ir)->u64; | |
| 102 return 1; | |
| 103 } | |
| 104 #endif | |
| 105 } | |
| 106 return 0; | |
| 107 } | |
| 108 | |
| 109 /* Check if there's no conflicting instruction between curins and ref. | |
| 110 ** Also avoid fusing loads if there are multiple references. | |
| 111 */ | |
| 112 static int noconflict(ASMState *as, IRRef ref, IROp conflict, int noload) | |
| 113 { | |
| 114 IRIns *ir = as->ir; | |
| 115 IRRef i = as->curins; | |
| 116 if (i > ref + CONFLICT_SEARCH_LIM) | |
| 117 return 0; /* Give up, ref is too far away. */ | |
| 118 while (--i > ref) { | |
| 119 if (ir[i].o == conflict) | |
| 120 return 0; /* Conflict found. */ | |
| 121 else if (!noload && (ir[i].op1 == ref || ir[i].op2 == ref)) | |
| 122 return 0; | |
| 123 } | |
| 124 return 1; /* Ok, no conflict. */ | |
| 125 } | |
| 126 | |
| 127 /* Fuse array base into memory operand. */ | |
| 128 static IRRef asm_fuseabase(ASMState *as, IRRef ref) | |
| 129 { | |
| 130 IRIns *irb = IR(ref); | |
| 131 as->mrm.ofs = 0; | |
| 132 if (irb->o == IR_FLOAD) { | |
| 133 IRIns *ira = IR(irb->op1); | |
| 134 lj_assertA(irb->op2 == IRFL_TAB_ARRAY, "expected FLOAD TAB_ARRAY"); | |
| 135 /* We can avoid the FLOAD of t->array for colocated arrays. */ | |
| 136 if (ira->o == IR_TNEW && ira->op1 <= LJ_MAX_COLOSIZE && | |
| 137 !neverfuse(as) && noconflict(as, irb->op1, IR_NEWREF, 1)) { | |
| 138 as->mrm.ofs = (int32_t)sizeof(GCtab); /* Ofs to colocated array. */ | |
| 139 return irb->op1; /* Table obj. */ | |
| 140 } | |
| 141 } else if (irb->o == IR_ADD && irref_isk(irb->op2)) { | |
| 142 /* Fuse base offset (vararg load). */ | |
| 143 as->mrm.ofs = IR(irb->op2)->i; | |
| 144 return irb->op1; | |
| 145 } | |
| 146 return ref; /* Otherwise use the given array base. */ | |
| 147 } | |
| 148 | |
| 149 /* Fuse array reference into memory operand. */ | |
| 150 static void asm_fusearef(ASMState *as, IRIns *ir, RegSet allow) | |
| 151 { | |
| 152 IRIns *irx; | |
| 153 lj_assertA(ir->o == IR_AREF, "expected AREF"); | |
| 154 as->mrm.base = (uint8_t)ra_alloc1(as, asm_fuseabase(as, ir->op1), allow); | |
| 155 irx = IR(ir->op2); | |
| 156 if (irref_isk(ir->op2)) { | |
| 157 as->mrm.ofs += 8*irx->i; | |
| 158 as->mrm.idx = RID_NONE; | |
| 159 } else { | |
| 160 rset_clear(allow, as->mrm.base); | |
| 161 as->mrm.scale = XM_SCALE8; | |
| 162 /* Fuse a constant ADD (e.g. t[i+1]) into the offset. | |
| 163 ** Doesn't help much without ABCelim, but reduces register pressure. | |
| 164 */ | |
| 165 if (!LJ_64 && /* Has bad effects with negative index on x64. */ | |
| 166 mayfuse(as, ir->op2) && ra_noreg(irx->r) && | |
| 167 irx->o == IR_ADD && irref_isk(irx->op2)) { | |
| 168 as->mrm.ofs += 8*IR(irx->op2)->i; | |
| 169 as->mrm.idx = (uint8_t)ra_alloc1(as, irx->op1, allow); | |
| 170 } else { | |
| 171 as->mrm.idx = (uint8_t)ra_alloc1(as, ir->op2, allow); | |
| 172 } | |
| 173 } | |
| 174 } | |
| 175 | |
| 176 /* Fuse array/hash/upvalue reference into memory operand. | |
| 177 ** Caveat: this may allocate GPRs for the base/idx registers. Be sure to | |
| 178 ** pass the final allow mask, excluding any GPRs used for other inputs. | |
| 179 ** In particular: 2-operand GPR instructions need to call ra_dest() first! | |
| 180 */ | |
| 181 static void asm_fuseahuref(ASMState *as, IRRef ref, RegSet allow) | |
| 182 { | |
| 183 IRIns *ir = IR(ref); | |
| 184 if (ra_noreg(ir->r)) { | |
| 185 switch ((IROp)ir->o) { | |
| 186 case IR_AREF: | |
| 187 if (mayfuse(as, ref)) { | |
| 188 asm_fusearef(as, ir, allow); | |
| 189 return; | |
| 190 } | |
| 191 break; | |
| 192 case IR_HREFK: | |
| 193 if (mayfuse(as, ref)) { | |
| 194 as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); | |
| 195 as->mrm.ofs = (int32_t)(IR(ir->op2)->op2 * sizeof(Node)); | |
| 196 as->mrm.idx = RID_NONE; | |
| 197 return; | |
| 198 } | |
| 199 break; | |
| 200 case IR_UREFC: | |
| 201 if (irref_isk(ir->op1)) { | |
| 202 GCfunc *fn = ir_kfunc(IR(ir->op1)); | |
| 203 GCupval *uv = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv; | |
| 204 #if LJ_GC64 | |
| 205 int64_t ofs = dispofs(as, &uv->tv); | |
| 206 if (checki32(ofs) && checki32(ofs+4)) { | |
| 207 as->mrm.ofs = (int32_t)ofs; | |
| 208 as->mrm.base = RID_DISPATCH; | |
| 209 as->mrm.idx = RID_NONE; | |
| 210 return; | |
| 211 } | |
| 212 #else | |
| 213 as->mrm.ofs = ptr2addr(&uv->tv); | |
| 214 as->mrm.base = as->mrm.idx = RID_NONE; | |
| 215 return; | |
| 216 #endif | |
| 217 } | |
| 218 break; | |
| 219 case IR_TMPREF: | |
| 220 #if LJ_GC64 | |
| 221 as->mrm.ofs = (int32_t)dispofs(as, &J2G(as->J)->tmptv); | |
| 222 as->mrm.base = RID_DISPATCH; | |
| 223 as->mrm.idx = RID_NONE; | |
| 224 #else | |
| 225 as->mrm.ofs = igcptr(&J2G(as->J)->tmptv); | |
| 226 as->mrm.base = as->mrm.idx = RID_NONE; | |
| 227 #endif | |
| 228 return; | |
| 229 default: | |
| 230 break; | |
| 231 } | |
| 232 } | |
| 233 as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); | |
| 234 as->mrm.ofs = 0; | |
| 235 as->mrm.idx = RID_NONE; | |
| 236 } | |
| 237 | |
| 238 /* Fuse FLOAD/FREF reference into memory operand. */ | |
| 239 static void asm_fusefref(ASMState *as, IRIns *ir, RegSet allow) | |
| 240 { | |
| 241 lj_assertA(ir->o == IR_FLOAD || ir->o == IR_FREF, | |
| 242 "bad IR op %d", ir->o); | |
| 243 as->mrm.idx = RID_NONE; | |
| 244 if (ir->op1 == REF_NIL) { /* FLOAD from GG_State with offset. */ | |
| 245 #if LJ_GC64 | |
| 246 as->mrm.ofs = (int32_t)(ir->op2 << 2) - GG_OFS(dispatch); | |
| 247 as->mrm.base = RID_DISPATCH; | |
| 248 #else | |
| 249 as->mrm.ofs = (int32_t)(ir->op2 << 2) + ptr2addr(J2GG(as->J)); | |
| 250 as->mrm.base = RID_NONE; | |
| 251 #endif | |
| 252 return; | |
| 253 } | |
| 254 as->mrm.ofs = field_ofs[ir->op2]; | |
| 255 if (irref_isk(ir->op1)) { | |
| 256 IRIns *op1 = IR(ir->op1); | |
| 257 #if LJ_GC64 | |
| 258 if (ir->op1 == REF_NIL) { | |
| 259 as->mrm.ofs -= GG_OFS(dispatch); | |
| 260 as->mrm.base = RID_DISPATCH; | |
| 261 return; | |
| 262 } else if (op1->o == IR_KPTR || op1->o == IR_KKPTR) { | |
| 263 intptr_t ofs = dispofs(as, ir_kptr(op1)); | |
| 264 if (checki32(as->mrm.ofs + ofs)) { | |
| 265 as->mrm.ofs += (int32_t)ofs; | |
| 266 as->mrm.base = RID_DISPATCH; | |
| 267 return; | |
| 268 } | |
| 269 } | |
| 270 #else | |
| 271 as->mrm.ofs += op1->i; | |
| 272 as->mrm.base = RID_NONE; | |
| 273 return; | |
| 274 #endif | |
| 275 } | |
| 276 as->mrm.base = (uint8_t)ra_alloc1(as, ir->op1, allow); | |
| 277 } | |
| 278 | |
| 279 /* Fuse string reference into memory operand. */ | |
| 280 static void asm_fusestrref(ASMState *as, IRIns *ir, RegSet allow) | |
| 281 { | |
| 282 IRIns *irr; | |
| 283 lj_assertA(ir->o == IR_STRREF, "bad IR op %d", ir->o); | |
| 284 as->mrm.base = as->mrm.idx = RID_NONE; | |
| 285 as->mrm.scale = XM_SCALE1; | |
| 286 as->mrm.ofs = sizeof(GCstr); | |
| 287 if (!LJ_GC64 && irref_isk(ir->op1)) { | |
| 288 as->mrm.ofs += IR(ir->op1)->i; | |
| 289 } else { | |
| 290 Reg r = ra_alloc1(as, ir->op1, allow); | |
| 291 rset_clear(allow, r); | |
| 292 as->mrm.base = (uint8_t)r; | |
| 293 } | |
| 294 irr = IR(ir->op2); | |
| 295 if (irref_isk(ir->op2)) { | |
| 296 as->mrm.ofs += irr->i; | |
| 297 } else { | |
| 298 Reg r; | |
| 299 /* Fuse a constant add into the offset, e.g. string.sub(s, i+10). */ | |
| 300 if (!LJ_64 && /* Has bad effects with negative index on x64. */ | |
| 301 mayfuse(as, ir->op2) && irr->o == IR_ADD && irref_isk(irr->op2)) { | |
| 302 as->mrm.ofs += IR(irr->op2)->i; | |
| 303 r = ra_alloc1(as, irr->op1, allow); | |
| 304 } else { | |
| 305 r = ra_alloc1(as, ir->op2, allow); | |
| 306 } | |
| 307 if (as->mrm.base == RID_NONE) | |
| 308 as->mrm.base = (uint8_t)r; | |
| 309 else | |
| 310 as->mrm.idx = (uint8_t)r; | |
| 311 } | |
| 312 } | |
| 313 | |
| 314 static void asm_fusexref(ASMState *as, IRRef ref, RegSet allow) | |
| 315 { | |
| 316 IRIns *ir = IR(ref); | |
| 317 as->mrm.idx = RID_NONE; | |
| 318 if (ir->o == IR_KPTR || ir->o == IR_KKPTR) { | |
| 319 #if LJ_GC64 | |
| 320 intptr_t ofs = dispofs(as, ir_kptr(ir)); | |
| 321 if (checki32(ofs)) { | |
| 322 as->mrm.ofs = (int32_t)ofs; | |
| 323 as->mrm.base = RID_DISPATCH; | |
| 324 return; | |
| 325 } | |
| 326 } if (0) { | |
| 327 #else | |
| 328 as->mrm.ofs = ir->i; | |
| 329 as->mrm.base = RID_NONE; | |
| 330 } else if (ir->o == IR_STRREF) { | |
| 331 asm_fusestrref(as, ir, allow); | |
| 332 #endif | |
| 333 } else { | |
| 334 as->mrm.ofs = 0; | |
| 335 if (canfuse(as, ir) && ir->o == IR_ADD && ra_noreg(ir->r)) { | |
| 336 /* Gather (base+idx*sz)+ofs as emitted by cdata ptr/array indexing. */ | |
| 337 IRIns *irx; | |
| 338 IRRef idx; | |
| 339 Reg r; | |
| 340 if (asm_isk32(as, ir->op2, &as->mrm.ofs)) { /* Recognize x+ofs. */ | |
| 341 ref = ir->op1; | |
| 342 ir = IR(ref); | |
| 343 if (!(ir->o == IR_ADD && canfuse(as, ir) && ra_noreg(ir->r))) | |
| 344 goto noadd; | |
| 345 } | |
| 346 as->mrm.scale = XM_SCALE1; | |
| 347 idx = ir->op1; | |
| 348 ref = ir->op2; | |
| 349 irx = IR(idx); | |
| 350 if (!(irx->o == IR_BSHL || irx->o == IR_ADD)) { /* Try other operand. */ | |
| 351 idx = ir->op2; | |
| 352 ref = ir->op1; | |
| 353 irx = IR(idx); | |
| 354 } | |
| 355 if (canfuse(as, irx) && ra_noreg(irx->r)) { | |
| 356 if (irx->o == IR_BSHL && irref_isk(irx->op2) && IR(irx->op2)->i <= 3) { | |
| 357 /* Recognize idx<<b with b = 0-3, corresponding to sz = (1),2,4,8. */ | |
| 358 idx = irx->op1; | |
| 359 as->mrm.scale = (uint8_t)(IR(irx->op2)->i << 6); | |
| 360 } else if (irx->o == IR_ADD && irx->op1 == irx->op2) { | |
| 361 /* FOLD does idx*2 ==> idx<<1 ==> idx+idx. */ | |
| 362 idx = irx->op1; | |
| 363 as->mrm.scale = XM_SCALE2; | |
| 364 } | |
| 365 } | |
| 366 r = ra_alloc1(as, idx, allow); | |
| 367 rset_clear(allow, r); | |
| 368 as->mrm.idx = (uint8_t)r; | |
| 369 } | |
| 370 noadd: | |
| 371 as->mrm.base = (uint8_t)ra_alloc1(as, ref, allow); | |
| 372 } | |
| 373 } | |
| 374 | |
| 375 /* Fuse load of 64 bit IR constant into memory operand. */ | |
| 376 static Reg asm_fuseloadk64(ASMState *as, IRIns *ir) | |
| 377 { | |
| 378 const uint64_t *k = &ir_k64(ir)->u64; | |
| 379 if (!LJ_GC64 || checki32((intptr_t)k)) { | |
| 380 as->mrm.ofs = ptr2addr(k); | |
| 381 as->mrm.base = RID_NONE; | |
| 382 #if LJ_GC64 | |
| 383 } else if (checki32(dispofs(as, k))) { | |
| 384 as->mrm.ofs = (int32_t)dispofs(as, k); | |
| 385 as->mrm.base = RID_DISPATCH; | |
| 386 } else if (checki32(mcpofs(as, k)) && checki32(mcpofs(as, k+1)) && | |
| 387 checki32(mctopofs(as, k)) && checki32(mctopofs(as, k+1))) { | |
| 388 as->mrm.ofs = (int32_t)mcpofs(as, k); | |
| 389 as->mrm.base = RID_RIP; | |
| 390 } else { /* Intern 64 bit constant at bottom of mcode. */ | |
| 391 if (ir->i) { | |
| 392 lj_assertA(*k == *(uint64_t*)(as->mctop - ir->i), | |
| 393 "bad interned 64 bit constant"); | |
| 394 } else { | |
| 395 while ((uintptr_t)as->mcbot & 7) *as->mcbot++ = XI_INT3; | |
| 396 *(uint64_t*)as->mcbot = *k; | |
| 397 ir->i = (int32_t)(as->mctop - as->mcbot); | |
| 398 as->mcbot += 8; | |
| 399 as->mclim = as->mcbot + MCLIM_REDZONE; | |
| 400 lj_mcode_commitbot(as->J, as->mcbot); | |
| 401 } | |
| 402 as->mrm.ofs = (int32_t)mcpofs(as, as->mctop - ir->i); | |
| 403 as->mrm.base = RID_RIP; | |
| 404 #endif | |
| 405 } | |
| 406 as->mrm.idx = RID_NONE; | |
| 407 return RID_MRM; | |
| 408 } | |
| 409 | |
| 410 /* Fuse load into memory operand. | |
| 411 ** | |
| 412 ** Important caveat: this may emit RIP-relative loads! So don't place any | |
| 413 ** code emitters between this function and the use of its result. | |
| 414 ** The only permitted exception is asm_guardcc(). | |
| 415 */ | |
| 416 static Reg asm_fuseload(ASMState *as, IRRef ref, RegSet allow) | |
| 417 { | |
| 418 IRIns *ir = IR(ref); | |
| 419 if (ra_hasreg(ir->r)) { | |
| 420 if (allow != RSET_EMPTY) { /* Fast path. */ | |
| 421 ra_noweak(as, ir->r); | |
| 422 return ir->r; | |
| 423 } | |
| 424 fusespill: | |
| 425 /* Force a spill if only memory operands are allowed (asm_x87load). */ | |
| 426 as->mrm.base = RID_ESP; | |
| 427 as->mrm.ofs = ra_spill(as, ir); | |
| 428 as->mrm.idx = RID_NONE; | |
| 429 return RID_MRM; | |
| 430 } | |
| 431 if (ir->o == IR_KNUM) { | |
| 432 RegSet avail = as->freeset & ~as->modset & RSET_FPR; | |
| 433 lj_assertA(allow != RSET_EMPTY, "no register allowed"); | |
| 434 if (!(avail & (avail-1))) /* Fuse if less than two regs available. */ | |
| 435 return asm_fuseloadk64(as, ir); | |
| 436 } else if (ref == REF_BASE || ir->o == IR_KINT64) { | |
| 437 RegSet avail = as->freeset & ~as->modset & RSET_GPR; | |
| 438 lj_assertA(allow != RSET_EMPTY, "no register allowed"); | |
| 439 if (!(avail & (avail-1))) { /* Fuse if less than two regs available. */ | |
| 440 if (ref == REF_BASE) { | |
| 441 #if LJ_GC64 | |
| 442 as->mrm.ofs = (int32_t)dispofs(as, &J2G(as->J)->jit_base); | |
| 443 as->mrm.base = RID_DISPATCH; | |
| 444 #else | |
| 445 as->mrm.ofs = ptr2addr(&J2G(as->J)->jit_base); | |
| 446 as->mrm.base = RID_NONE; | |
| 447 #endif | |
| 448 as->mrm.idx = RID_NONE; | |
| 449 return RID_MRM; | |
| 450 } else { | |
| 451 return asm_fuseloadk64(as, ir); | |
| 452 } | |
| 453 } | |
| 454 } else if (mayfuse(as, ref)) { | |
| 455 RegSet xallow = (allow & RSET_GPR) ? allow : RSET_GPR; | |
| 456 if (ir->o == IR_SLOAD) { | |
| 457 if (!(ir->op2 & (IRSLOAD_PARENT|IRSLOAD_CONVERT)) && | |
| 458 noconflict(as, ref, IR_RETF, 0) && | |
| 459 !(LJ_GC64 && irt_isaddr(ir->t))) { | |
| 460 as->mrm.base = (uint8_t)ra_alloc1(as, REF_BASE, xallow); | |
| 461 as->mrm.ofs = 8*((int32_t)ir->op1-1-LJ_FR2) + | |
| 462 (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0); | |
| 463 as->mrm.idx = RID_NONE; | |
| 464 return RID_MRM; | |
| 465 } | |
| 466 } else if (ir->o == IR_FLOAD) { | |
| 467 /* Generic fusion is only ok for 32 bit operand (but see asm_comp). */ | |
| 468 if ((irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t)) && | |
| 469 noconflict(as, ref, IR_FSTORE, 0)) { | |
| 470 asm_fusefref(as, ir, xallow); | |
| 471 return RID_MRM; | |
| 472 } | |
| 473 } else if (ir->o == IR_ALOAD || ir->o == IR_HLOAD || ir->o == IR_ULOAD) { | |
| 474 if (noconflict(as, ref, ir->o + IRDELTA_L2S, 0) && | |
| 475 !(LJ_GC64 && irt_isaddr(ir->t))) { | |
| 476 asm_fuseahuref(as, ir->op1, xallow); | |
| 477 return RID_MRM; | |
| 478 } | |
| 479 } else if (ir->o == IR_XLOAD) { | |
| 480 /* Generic fusion is not ok for 8/16 bit operands (but see asm_comp). | |
| 481 ** Fusing unaligned memory operands is ok on x86 (except for SIMD types). | |
| 482 */ | |
| 483 if ((!irt_typerange(ir->t, IRT_I8, IRT_U16)) && | |
| 484 noconflict(as, ref, IR_XSTORE, 0)) { | |
| 485 asm_fusexref(as, ir->op1, xallow); | |
| 486 return RID_MRM; | |
| 487 } | |
| 488 } else if (ir->o == IR_VLOAD && IR(ir->op1)->o == IR_AREF && | |
| 489 !(LJ_GC64 && irt_isaddr(ir->t))) { | |
| 490 asm_fuseahuref(as, ir->op1, xallow); | |
| 491 as->mrm.ofs += 8 * ir->op2; | |
| 492 return RID_MRM; | |
| 493 } | |
| 494 } | |
| 495 if (ir->o == IR_FLOAD && ir->op1 == REF_NIL) { | |
| 496 asm_fusefref(as, ir, RSET_EMPTY); | |
| 497 return RID_MRM; | |
| 498 } | |
| 499 if (!(as->freeset & allow) && !emit_canremat(ref) && | |
| 500 (allow == RSET_EMPTY || ra_hasspill(ir->s) || iscrossref(as, ref))) | |
| 501 goto fusespill; | |
| 502 return ra_allocref(as, ref, allow); | |
| 503 } | |
| 504 | |
| 505 #if LJ_64 | |
| 506 /* Don't fuse a 32 bit load into a 64 bit operation. */ | |
| 507 static Reg asm_fuseloadm(ASMState *as, IRRef ref, RegSet allow, int is64) | |
| 508 { | |
| 509 if (is64 && !irt_is64(IR(ref)->t)) | |
| 510 return ra_alloc1(as, ref, allow); | |
| 511 return asm_fuseload(as, ref, allow); | |
| 512 } | |
| 513 #else | |
| 514 #define asm_fuseloadm(as, ref, allow, is64) asm_fuseload(as, (ref), (allow)) | |
| 515 #endif | |
| 516 | |
| 517 /* -- Calls --------------------------------------------------------------- */ | |
| 518 | |
| 519 /* Count the required number of stack slots for a call. */ | |
| 520 static int asm_count_call_slots(ASMState *as, const CCallInfo *ci, IRRef *args) | |
| 521 { | |
| 522 uint32_t i, nargs = CCI_XNARGS(ci); | |
| 523 int nslots = 0; | |
| 524 #if LJ_64 | |
| 525 if (LJ_ABI_WIN) { | |
| 526 nslots = (int)(nargs*2); /* Only matters for more than four args. */ | |
| 527 } else { | |
| 528 int ngpr = REGARG_NUMGPR, nfpr = REGARG_NUMFPR; | |
| 529 for (i = 0; i < nargs; i++) | |
| 530 if (args[i] && irt_isfp(IR(args[i])->t)) { | |
| 531 if (nfpr > 0) nfpr--; else nslots += 2; | |
| 532 } else { | |
| 533 if (ngpr > 0) ngpr--; else nslots += 2; | |
| 534 } | |
| 535 } | |
| 536 #else | |
| 537 int ngpr = 0; | |
| 538 if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL) | |
| 539 ngpr = 2; | |
| 540 else if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL) | |
| 541 ngpr = 1; | |
| 542 for (i = 0; i < nargs; i++) | |
| 543 if (args[i] && irt_isfp(IR(args[i])->t)) { | |
| 544 nslots += irt_isnum(IR(args[i])->t) ? 2 : 1; | |
| 545 } else { | |
| 546 if (ngpr > 0) ngpr--; else nslots++; | |
| 547 } | |
| 548 #endif | |
| 549 return nslots; | |
| 550 } | |
| 551 | |
| 552 /* Generate a call to a C function. */ | |
| 553 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args) | |
| 554 { | |
| 555 uint32_t n, nargs = CCI_XNARGS(ci); | |
| 556 int32_t ofs = STACKARG_OFS; | |
| 557 #if LJ_64 | |
| 558 uint32_t gprs = REGARG_GPRS; | |
| 559 Reg fpr = REGARG_FIRSTFPR; | |
| 560 #if !LJ_ABI_WIN | |
| 561 MCode *patchnfpr = NULL; | |
| 562 #endif | |
| 563 #else | |
| 564 uint32_t gprs = 0; | |
| 565 if ((ci->flags & CCI_CC_MASK) != CCI_CC_CDECL) { | |
| 566 if ((ci->flags & CCI_CC_MASK) == CCI_CC_THISCALL) | |
| 567 gprs = (REGARG_GPRS & 31); | |
| 568 else if ((ci->flags & CCI_CC_MASK) == CCI_CC_FASTCALL) | |
| 569 gprs = REGARG_GPRS; | |
| 570 } | |
| 571 #endif | |
| 572 if ((void *)ci->func) | |
| 573 emit_call(as, ci->func); | |
| 574 #if LJ_64 | |
| 575 if ((ci->flags & CCI_VARARG)) { /* Special handling for vararg calls. */ | |
| 576 #if LJ_ABI_WIN | |
| 577 for (n = 0; n < 4 && n < nargs; n++) { | |
| 578 IRIns *ir = IR(args[n]); | |
| 579 if (irt_isfp(ir->t)) /* Duplicate FPRs in GPRs. */ | |
| 580 emit_rr(as, XO_MOVDto, (irt_isnum(ir->t) ? REX_64 : 0) | (fpr+n), | |
| 581 ((gprs >> (n*5)) & 31)); /* Either MOVD or MOVQ. */ | |
| 582 } | |
| 583 #else | |
| 584 patchnfpr = --as->mcp; /* Indicate number of used FPRs in register al. */ | |
| 585 *--as->mcp = XI_MOVrib | RID_EAX; | |
| 586 #endif | |
| 587 } | |
| 588 #endif | |
| 589 for (n = 0; n < nargs; n++) { /* Setup args. */ | |
| 590 IRRef ref = args[n]; | |
| 591 IRIns *ir = IR(ref); | |
| 592 Reg r; | |
| 593 #if LJ_64 && LJ_ABI_WIN | |
| 594 /* Windows/x64 argument registers are strictly positional. */ | |
| 595 r = irt_isfp(ir->t) ? (fpr <= REGARG_LASTFPR ? fpr : 0) : (gprs & 31); | |
| 596 fpr++; gprs >>= 5; | |
| 597 #elif LJ_64 | |
| 598 /* POSIX/x64 argument registers are used in order of appearance. */ | |
| 599 if (irt_isfp(ir->t)) { | |
| 600 r = fpr <= REGARG_LASTFPR ? fpr++ : 0; | |
| 601 } else { | |
| 602 r = gprs & 31; gprs >>= 5; | |
| 603 } | |
| 604 #else | |
| 605 if (ref && irt_isfp(ir->t)) { | |
| 606 r = 0; | |
| 607 } else { | |
| 608 r = gprs & 31; gprs >>= 5; | |
| 609 if (!ref) continue; | |
| 610 } | |
| 611 #endif | |
| 612 if (r) { /* Argument is in a register. */ | |
| 613 if (r < RID_MAX_GPR && ref < ASMREF_TMP1) { | |
| 614 #if LJ_64 | |
| 615 if (LJ_GC64 ? !(ir->o == IR_KINT || ir->o == IR_KNULL) : ir->o == IR_KINT64) | |
| 616 emit_loadu64(as, r, ir_k64(ir)->u64); | |
| 617 else | |
| 618 #endif | |
| 619 emit_loadi(as, r, ir->i); | |
| 620 } else { | |
| 621 /* Must have been evicted. */ | |
| 622 lj_assertA(rset_test(as->freeset, r), "reg %d not free", r); | |
| 623 if (ra_hasreg(ir->r)) { | |
| 624 ra_noweak(as, ir->r); | |
| 625 emit_movrr(as, ir, r, ir->r); | |
| 626 } else { | |
| 627 ra_allocref(as, ref, RID2RSET(r)); | |
| 628 } | |
| 629 } | |
| 630 } else if (irt_isfp(ir->t)) { /* FP argument is on stack. */ | |
| 631 lj_assertA(!(irt_isfloat(ir->t) && irref_isk(ref)), | |
| 632 "unexpected float constant"); | |
| 633 if (LJ_32 && (ofs & 4) && irref_isk(ref)) { | |
| 634 /* Split stores for unaligned FP consts. */ | |
| 635 emit_movmroi(as, RID_ESP, ofs, (int32_t)ir_knum(ir)->u32.lo); | |
| 636 emit_movmroi(as, RID_ESP, ofs+4, (int32_t)ir_knum(ir)->u32.hi); | |
| 637 } else { | |
| 638 r = ra_alloc1(as, ref, RSET_FPR); | |
| 639 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSDto : XO_MOVSSto, | |
| 640 r, RID_ESP, ofs); | |
| 641 } | |
| 642 ofs += (LJ_32 && irt_isfloat(ir->t)) ? 4 : 8; | |
| 643 } else { /* Non-FP argument is on stack. */ | |
| 644 if (LJ_32 && ref < ASMREF_TMP1) { | |
| 645 emit_movmroi(as, RID_ESP, ofs, ir->i); | |
| 646 } else { | |
| 647 r = ra_alloc1(as, ref, RSET_GPR); | |
| 648 emit_movtomro(as, REX_64 + r, RID_ESP, ofs); | |
| 649 } | |
| 650 ofs += sizeof(intptr_t); | |
| 651 } | |
| 652 checkmclim(as); | |
| 653 } | |
| 654 #if LJ_64 && !LJ_ABI_WIN | |
| 655 if (patchnfpr) *patchnfpr = fpr - REGARG_FIRSTFPR; | |
| 656 #endif | |
| 657 } | |
| 658 | |
| 659 /* Setup result reg/sp for call. Evict scratch regs. */ | |
| 660 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci) | |
| 661 { | |
| 662 RegSet drop = RSET_SCRATCH; | |
| 663 int hiop = ((ir+1)->o == IR_HIOP && !irt_isnil((ir+1)->t)); | |
| 664 if ((ci->flags & CCI_NOFPRCLOBBER)) | |
| 665 drop &= ~RSET_FPR; | |
| 666 if (ra_hasreg(ir->r)) | |
| 667 rset_clear(drop, ir->r); /* Dest reg handled below. */ | |
| 668 if (hiop && ra_hasreg((ir+1)->r)) | |
| 669 rset_clear(drop, (ir+1)->r); /* Dest reg handled below. */ | |
| 670 ra_evictset(as, drop); /* Evictions must be performed first. */ | |
| 671 if (ra_used(ir)) { | |
| 672 if (irt_isfp(ir->t)) { | |
| 673 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | |
| 674 #if LJ_64 | |
| 675 if ((ci->flags & CCI_CASTU64)) { | |
| 676 Reg dest = ir->r; | |
| 677 if (ra_hasreg(dest)) { | |
| 678 ra_free(as, dest); | |
| 679 ra_modified(as, dest); | |
| 680 emit_rr(as, XO_MOVD, dest|REX_64, RID_RET); /* Really MOVQ. */ | |
| 681 } | |
| 682 if (ofs) emit_movtomro(as, RID_RET|REX_64, RID_ESP, ofs); | |
| 683 } else { | |
| 684 ra_destreg(as, ir, RID_FPRET); | |
| 685 } | |
| 686 #else | |
| 687 /* Number result is in x87 st0 for x86 calling convention. */ | |
| 688 Reg dest = ir->r; | |
| 689 if (ra_hasreg(dest)) { | |
| 690 ra_free(as, dest); | |
| 691 ra_modified(as, dest); | |
| 692 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, | |
| 693 dest, RID_ESP, ofs); | |
| 694 } | |
| 695 if ((ci->flags & CCI_CASTU64)) { | |
| 696 emit_movtomro(as, RID_RETLO, RID_ESP, ofs); | |
| 697 emit_movtomro(as, RID_RETHI, RID_ESP, ofs+4); | |
| 698 } else { | |
| 699 emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, | |
| 700 irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); | |
| 701 } | |
| 702 #endif | |
| 703 } else if (hiop) { | |
| 704 ra_destpair(as, ir); | |
| 705 } else { | |
| 706 lj_assertA(!irt_ispri(ir->t), "PRI dest"); | |
| 707 ra_destreg(as, ir, RID_RET); | |
| 708 } | |
| 709 } else if (LJ_32 && irt_isfp(ir->t) && !(ci->flags & CCI_CASTU64)) { | |
| 710 emit_x87op(as, XI_FPOP); /* Pop unused result from x87 st0. */ | |
| 711 } | |
| 712 } | |
| 713 | |
| 714 /* Return a constant function pointer or NULL for indirect calls. */ | |
| 715 static void *asm_callx_func(ASMState *as, IRIns *irf, IRRef func) | |
| 716 { | |
| 717 #if LJ_32 | |
| 718 UNUSED(as); | |
| 719 if (irref_isk(func)) | |
| 720 return (void *)irf->i; | |
| 721 #else | |
| 722 if (irref_isk(func)) { | |
| 723 MCode *p; | |
| 724 if (irf->o == IR_KINT64) | |
| 725 p = (MCode *)(void *)ir_k64(irf)->u64; | |
| 726 else | |
| 727 p = (MCode *)(void *)(uintptr_t)(uint32_t)irf->i; | |
| 728 if (p - as->mcp == (int32_t)(p - as->mcp)) | |
| 729 return p; /* Call target is still in +-2GB range. */ | |
| 730 /* Avoid the indirect case of emit_call(). Try to hoist func addr. */ | |
| 731 } | |
| 732 #endif | |
| 733 return NULL; | |
| 734 } | |
| 735 | |
| 736 static void asm_callx(ASMState *as, IRIns *ir) | |
| 737 { | |
| 738 IRRef args[CCI_NARGS_MAX*2]; | |
| 739 CCallInfo ci; | |
| 740 IRRef func; | |
| 741 IRIns *irf; | |
| 742 int32_t spadj = 0; | |
| 743 ci.flags = asm_callx_flags(as, ir); | |
| 744 asm_collectargs(as, ir, &ci, args); | |
| 745 asm_setupresult(as, ir, &ci); | |
| 746 #if LJ_32 | |
| 747 /* Have to readjust stack after non-cdecl calls due to callee cleanup. */ | |
| 748 if ((ci.flags & CCI_CC_MASK) != CCI_CC_CDECL) | |
| 749 spadj = 4 * asm_count_call_slots(as, &ci, args); | |
| 750 #endif | |
| 751 func = ir->op2; irf = IR(func); | |
| 752 if (irf->o == IR_CARG) { func = irf->op1; irf = IR(func); } | |
| 753 ci.func = (ASMFunction)asm_callx_func(as, irf, func); | |
| 754 if (!(void *)ci.func) { | |
| 755 /* Use a (hoistable) non-scratch register for indirect calls. */ | |
| 756 RegSet allow = (RSET_GPR & ~RSET_SCRATCH); | |
| 757 Reg r = ra_alloc1(as, func, allow); | |
| 758 if (LJ_32) emit_spsub(as, spadj); /* Above code may cause restores! */ | |
| 759 emit_rr(as, XO_GROUP5, XOg_CALL, r); | |
| 760 } else if (LJ_32) { | |
| 761 emit_spsub(as, spadj); | |
| 762 } | |
| 763 asm_gencall(as, &ci, args); | |
| 764 } | |
| 765 | |
| 766 /* -- Returns ------------------------------------------------------------- */ | |
| 767 | |
| 768 /* Return to lower frame. Guard that it goes to the right spot. */ | |
| 769 static void asm_retf(ASMState *as, IRIns *ir) | |
| 770 { | |
| 771 Reg base = ra_alloc1(as, REF_BASE, RSET_GPR); | |
| 772 #if LJ_FR2 | |
| 773 Reg rpc = ra_scratch(as, rset_exclude(RSET_GPR, base)); | |
| 774 #endif | |
| 775 void *pc = ir_kptr(IR(ir->op2)); | |
| 776 int32_t delta = 1+LJ_FR2+bc_a(*((const BCIns *)pc - 1)); | |
| 777 as->topslot -= (BCReg)delta; | |
| 778 if ((int32_t)as->topslot < 0) as->topslot = 0; | |
| 779 irt_setmark(IR(REF_BASE)->t); /* Children must not coalesce with BASE reg. */ | |
| 780 emit_setgl(as, base, jit_base); | |
| 781 emit_addptr(as, base, -8*delta); | |
| 782 asm_guardcc(as, CC_NE); | |
| 783 #if LJ_FR2 | |
| 784 emit_rmro(as, XO_CMP, rpc|REX_GC64, base, -8); | |
| 785 emit_loadu64(as, rpc, u64ptr(pc)); | |
| 786 #else | |
| 787 emit_gmroi(as, XG_ARITHi(XOg_CMP), base, -4, ptr2addr(pc)); | |
| 788 #endif | |
| 789 } | |
| 790 | |
| 791 /* -- Buffer operations --------------------------------------------------- */ | |
| 792 | |
| 793 #if LJ_HASBUFFER | |
| 794 static void asm_bufhdr_write(ASMState *as, Reg sb) | |
| 795 { | |
| 796 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb)); | |
| 797 IRIns irgc; | |
| 798 irgc.ot = IRT(0, IRT_PGC); /* GC type. */ | |
| 799 emit_storeofs(as, &irgc, tmp, sb, offsetof(SBuf, L)); | |
| 800 emit_opgl(as, XO_ARITH(XOg_OR), tmp|REX_GC64, cur_L); | |
| 801 emit_gri(as, XG_ARITHi(XOg_AND), tmp, SBUF_MASK_FLAG); | |
| 802 emit_loadofs(as, &irgc, tmp, sb, offsetof(SBuf, L)); | |
| 803 } | |
| 804 #endif | |
| 805 | |
| 806 /* -- Type conversions ---------------------------------------------------- */ | |
| 807 | |
| 808 static void asm_tointg(ASMState *as, IRIns *ir, Reg left) | |
| 809 { | |
| 810 Reg tmp = ra_scratch(as, rset_exclude(RSET_FPR, left)); | |
| 811 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 812 asm_guardcc(as, CC_P); | |
| 813 asm_guardcc(as, CC_NE); | |
| 814 emit_rr(as, XO_UCOMISD, left, tmp); | |
| 815 emit_rr(as, XO_CVTSI2SD, tmp, dest); | |
| 816 emit_rr(as, XO_XORPS, tmp, tmp); /* Avoid partial register stall. */ | |
| 817 emit_rr(as, XO_CVTTSD2SI, dest, left); | |
| 818 /* Can't fuse since left is needed twice. */ | |
| 819 } | |
| 820 | |
| 821 static void asm_tobit(ASMState *as, IRIns *ir) | |
| 822 { | |
| 823 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 824 Reg tmp = ra_noreg(IR(ir->op1)->r) ? | |
| 825 ra_alloc1(as, ir->op1, RSET_FPR) : | |
| 826 ra_scratch(as, RSET_FPR); | |
| 827 Reg right; | |
| 828 emit_rr(as, XO_MOVDto, tmp, dest); | |
| 829 right = asm_fuseload(as, ir->op2, rset_exclude(RSET_FPR, tmp)); | |
| 830 emit_mrm(as, XO_ADDSD, tmp, right); | |
| 831 ra_left(as, tmp, ir->op1); | |
| 832 } | |
| 833 | |
| 834 static void asm_conv(ASMState *as, IRIns *ir) | |
| 835 { | |
| 836 IRType st = (IRType)(ir->op2 & IRCONV_SRCMASK); | |
| 837 int st64 = (st == IRT_I64 || st == IRT_U64 || (LJ_64 && st == IRT_P64)); | |
| 838 int stfp = (st == IRT_NUM || st == IRT_FLOAT); | |
| 839 IRRef lref = ir->op1; | |
| 840 lj_assertA(irt_type(ir->t) != st, "inconsistent types for CONV"); | |
| 841 lj_assertA(!(LJ_32 && (irt_isint64(ir->t) || st64)), | |
| 842 "IR %04d has unsplit 64 bit type", | |
| 843 (int)(ir - as->ir) - REF_BIAS); | |
| 844 if (irt_isfp(ir->t)) { | |
| 845 Reg dest = ra_dest(as, ir, RSET_FPR); | |
| 846 if (stfp) { /* FP to FP conversion. */ | |
| 847 Reg left = asm_fuseload(as, lref, RSET_FPR); | |
| 848 emit_mrm(as, st == IRT_NUM ? XO_CVTSD2SS : XO_CVTSS2SD, dest, left); | |
| 849 if (left == dest) return; /* Avoid the XO_XORPS. */ | |
| 850 } else if (LJ_32 && st == IRT_U32) { /* U32 to FP conversion on x86. */ | |
| 851 /* number = (2^52+2^51 .. u32) - (2^52+2^51) */ | |
| 852 cTValue *k = &as->J->k64[LJ_K64_TOBIT]; | |
| 853 Reg bias = ra_scratch(as, rset_exclude(RSET_FPR, dest)); | |
| 854 if (irt_isfloat(ir->t)) | |
| 855 emit_rr(as, XO_CVTSD2SS, dest, dest); | |
| 856 emit_rr(as, XO_SUBSD, dest, bias); /* Subtract 2^52+2^51 bias. */ | |
| 857 emit_rr(as, XO_XORPS, dest, bias); /* Merge bias and integer. */ | |
| 858 emit_rma(as, XO_MOVSD, bias, k); | |
| 859 emit_mrm(as, XO_MOVD, dest, asm_fuseload(as, lref, RSET_GPR)); | |
| 860 return; | |
| 861 } else { /* Integer to FP conversion. */ | |
| 862 Reg left = (LJ_64 && (st == IRT_U32 || st == IRT_U64)) ? | |
| 863 ra_alloc1(as, lref, RSET_GPR) : | |
| 864 asm_fuseloadm(as, lref, RSET_GPR, st64); | |
| 865 if (LJ_64 && st == IRT_U64) { | |
| 866 MCLabel l_end = emit_label(as); | |
| 867 cTValue *k = &as->J->k64[LJ_K64_2P64]; | |
| 868 emit_rma(as, XO_ADDSD, dest, k); /* Add 2^64 to compensate. */ | |
| 869 emit_sjcc(as, CC_NS, l_end); | |
| 870 emit_rr(as, XO_TEST, left|REX_64, left); /* Check if u64 >= 2^63. */ | |
| 871 } | |
| 872 emit_mrm(as, irt_isnum(ir->t) ? XO_CVTSI2SD : XO_CVTSI2SS, | |
| 873 dest|((LJ_64 && (st64 || st == IRT_U32)) ? REX_64 : 0), left); | |
| 874 } | |
| 875 emit_rr(as, XO_XORPS, dest, dest); /* Avoid partial register stall. */ | |
| 876 } else if (stfp) { /* FP to integer conversion. */ | |
| 877 if (irt_isguard(ir->t)) { | |
| 878 /* Checked conversions are only supported from number to int. */ | |
| 879 lj_assertA(irt_isint(ir->t) && st == IRT_NUM, | |
| 880 "bad type for checked CONV"); | |
| 881 asm_tointg(as, ir, ra_alloc1(as, lref, RSET_FPR)); | |
| 882 } else { | |
| 883 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 884 x86Op op = st == IRT_NUM ? XO_CVTTSD2SI : XO_CVTTSS2SI; | |
| 885 if (LJ_64 ? irt_isu64(ir->t) : irt_isu32(ir->t)) { | |
| 886 /* LJ_64: For inputs >= 2^63 add -2^64, convert again. */ | |
| 887 /* LJ_32: For inputs >= 2^31 add -2^31, convert again and add 2^31. */ | |
| 888 Reg tmp = ra_noreg(IR(lref)->r) ? ra_alloc1(as, lref, RSET_FPR) : | |
| 889 ra_scratch(as, RSET_FPR); | |
| 890 MCLabel l_end = emit_label(as); | |
| 891 if (LJ_32) | |
| 892 emit_gri(as, XG_ARITHi(XOg_ADD), dest, (int32_t)0x80000000); | |
| 893 emit_rr(as, op, dest|REX_64, tmp); | |
| 894 if (st == IRT_NUM) | |
| 895 emit_rma(as, XO_ADDSD, tmp, &as->J->k64[LJ_K64_M2P64_31]); | |
| 896 else | |
| 897 emit_rma(as, XO_ADDSS, tmp, &as->J->k32[LJ_K32_M2P64_31]); | |
| 898 emit_sjcc(as, CC_NS, l_end); | |
| 899 emit_rr(as, XO_TEST, dest|REX_64, dest); /* Check if dest negative. */ | |
| 900 emit_rr(as, op, dest|REX_64, tmp); | |
| 901 ra_left(as, tmp, lref); | |
| 902 } else { | |
| 903 if (LJ_64 && irt_isu32(ir->t)) | |
| 904 emit_rr(as, XO_MOV, dest, dest); /* Zero hiword. */ | |
| 905 emit_mrm(as, op, | |
| 906 dest|((LJ_64 && | |
| 907 (irt_is64(ir->t) || irt_isu32(ir->t))) ? REX_64 : 0), | |
| 908 asm_fuseload(as, lref, RSET_FPR)); | |
| 909 } | |
| 910 } | |
| 911 } else if (st >= IRT_I8 && st <= IRT_U16) { /* Extend to 32 bit integer. */ | |
| 912 Reg left, dest = ra_dest(as, ir, RSET_GPR); | |
| 913 RegSet allow = RSET_GPR; | |
| 914 x86Op op; | |
| 915 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t), "bad type for CONV EXT"); | |
| 916 if (st == IRT_I8) { | |
| 917 op = XO_MOVSXb; allow = RSET_GPR8; dest |= FORCE_REX; | |
| 918 } else if (st == IRT_U8) { | |
| 919 op = XO_MOVZXb; allow = RSET_GPR8; dest |= FORCE_REX; | |
| 920 } else if (st == IRT_I16) { | |
| 921 op = XO_MOVSXw; | |
| 922 } else { | |
| 923 op = XO_MOVZXw; | |
| 924 } | |
| 925 left = asm_fuseload(as, lref, allow); | |
| 926 /* Add extra MOV if source is already in wrong register. */ | |
| 927 if (!LJ_64 && left != RID_MRM && !rset_test(allow, left)) { | |
| 928 Reg tmp = ra_scratch(as, allow); | |
| 929 emit_rr(as, op, dest, tmp); | |
| 930 emit_rr(as, XO_MOV, tmp, left); | |
| 931 } else { | |
| 932 emit_mrm(as, op, dest, left); | |
| 933 } | |
| 934 } else { /* 32/64 bit integer conversions. */ | |
| 935 if (LJ_32) { /* Only need to handle 32/32 bit no-op (cast) on x86. */ | |
| 936 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 937 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | |
| 938 } else if (irt_is64(ir->t)) { | |
| 939 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 940 if (st64 || !(ir->op2 & IRCONV_SEXT)) { | |
| 941 /* 64/64 bit no-op (cast) or 32 to 64 bit zero extension. */ | |
| 942 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | |
| 943 } else { /* 32 to 64 bit sign extension. */ | |
| 944 Reg left = asm_fuseload(as, lref, RSET_GPR); | |
| 945 emit_mrm(as, XO_MOVSXd, dest|REX_64, left); | |
| 946 } | |
| 947 } else { | |
| 948 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 949 if (st64 && !(ir->op2 & IRCONV_NONE)) { | |
| 950 Reg left = asm_fuseload(as, lref, RSET_GPR); | |
| 951 /* This is either a 32 bit reg/reg mov which zeroes the hiword | |
| 952 ** or a load of the loword from a 64 bit address. | |
| 953 */ | |
| 954 emit_mrm(as, XO_MOV, dest, left); | |
| 955 } else { /* 32/32 bit no-op (cast). */ | |
| 956 ra_left(as, dest, lref); /* Do nothing, but may need to move regs. */ | |
| 957 } | |
| 958 } | |
| 959 } | |
| 960 } | |
| 961 | |
| 962 #if LJ_32 && LJ_HASFFI | |
| 963 /* No SSE conversions to/from 64 bit on x86, so resort to ugly x87 code. */ | |
| 964 | |
| 965 /* 64 bit integer to FP conversion in 32 bit mode. */ | |
| 966 static void asm_conv_fp_int64(ASMState *as, IRIns *ir) | |
| 967 { | |
| 968 Reg hi = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 969 Reg lo = ra_alloc1(as, (ir-1)->op1, rset_exclude(RSET_GPR, hi)); | |
| 970 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | |
| 971 Reg dest = ir->r; | |
| 972 if (ra_hasreg(dest)) { | |
| 973 ra_free(as, dest); | |
| 974 ra_modified(as, dest); | |
| 975 emit_rmro(as, irt_isnum(ir->t) ? XO_MOVSD : XO_MOVSS, dest, RID_ESP, ofs); | |
| 976 } | |
| 977 emit_rmro(as, irt_isnum(ir->t) ? XO_FSTPq : XO_FSTPd, | |
| 978 irt_isnum(ir->t) ? XOg_FSTPq : XOg_FSTPd, RID_ESP, ofs); | |
| 979 if (((ir-1)->op2 & IRCONV_SRCMASK) == IRT_U64) { | |
| 980 /* For inputs in [2^63,2^64-1] add 2^64 to compensate. */ | |
| 981 MCLabel l_end = emit_label(as); | |
| 982 emit_rma(as, XO_FADDq, XOg_FADDq, &as->J->k64[LJ_K64_2P64]); | |
| 983 emit_sjcc(as, CC_NS, l_end); | |
| 984 emit_rr(as, XO_TEST, hi, hi); /* Check if u64 >= 2^63. */ | |
| 985 } else { | |
| 986 lj_assertA(((ir-1)->op2 & IRCONV_SRCMASK) == IRT_I64, "bad type for CONV"); | |
| 987 } | |
| 988 emit_rmro(as, XO_FILDq, XOg_FILDq, RID_ESP, 0); | |
| 989 /* NYI: Avoid narrow-to-wide store-to-load forwarding stall. */ | |
| 990 emit_rmro(as, XO_MOVto, hi, RID_ESP, 4); | |
| 991 emit_rmro(as, XO_MOVto, lo, RID_ESP, 0); | |
| 992 } | |
| 993 | |
| 994 /* FP to 64 bit integer conversion in 32 bit mode. */ | |
| 995 static void asm_conv_int64_fp(ASMState *as, IRIns *ir) | |
| 996 { | |
| 997 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK); | |
| 998 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH); | |
| 999 Reg lo, hi; | |
| 1000 lj_assertA(st == IRT_NUM || st == IRT_FLOAT, "bad type for CONV"); | |
| 1001 lj_assertA(dt == IRT_I64 || dt == IRT_U64, "bad type for CONV"); | |
| 1002 hi = ra_dest(as, ir, RSET_GPR); | |
| 1003 lo = ra_dest(as, ir-1, rset_exclude(RSET_GPR, hi)); | |
| 1004 if (ra_used(ir-1)) emit_rmro(as, XO_MOV, lo, RID_ESP, 0); | |
| 1005 /* NYI: Avoid wide-to-narrow store-to-load forwarding stall. */ | |
| 1006 if (!(as->flags & JIT_F_SSE3)) { /* Set FPU rounding mode to default. */ | |
| 1007 emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 4); | |
| 1008 emit_rmro(as, XO_MOVto, lo, RID_ESP, 4); | |
| 1009 emit_gri(as, XG_ARITHi(XOg_AND), lo, 0xf3ff); | |
| 1010 } | |
| 1011 if (dt == IRT_U64) { | |
| 1012 /* For inputs in [2^63,2^64-1] add -2^64 and convert again. */ | |
| 1013 MCLabel l_pop, l_end = emit_label(as); | |
| 1014 emit_x87op(as, XI_FPOP); | |
| 1015 l_pop = emit_label(as); | |
| 1016 emit_sjmp(as, l_end); | |
| 1017 emit_rmro(as, XO_MOV, hi, RID_ESP, 4); | |
| 1018 if ((as->flags & JIT_F_SSE3)) | |
| 1019 emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); | |
| 1020 else | |
| 1021 emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); | |
| 1022 emit_rma(as, XO_FADDq, XOg_FADDq, &as->J->k64[LJ_K64_M2P64]); | |
| 1023 emit_sjcc(as, CC_NS, l_pop); | |
| 1024 emit_rr(as, XO_TEST, hi, hi); /* Check if out-of-range (2^63). */ | |
| 1025 } | |
| 1026 emit_rmro(as, XO_MOV, hi, RID_ESP, 4); | |
| 1027 if ((as->flags & JIT_F_SSE3)) { /* Truncation is easy with SSE3. */ | |
| 1028 emit_rmro(as, XO_FISTTPq, XOg_FISTTPq, RID_ESP, 0); | |
| 1029 } else { /* Otherwise set FPU rounding mode to truncate before the store. */ | |
| 1030 emit_rmro(as, XO_FISTPq, XOg_FISTPq, RID_ESP, 0); | |
| 1031 emit_rmro(as, XO_FLDCW, XOg_FLDCW, RID_ESP, 0); | |
| 1032 emit_rmro(as, XO_MOVtow, lo, RID_ESP, 0); | |
| 1033 emit_rmro(as, XO_ARITHw(XOg_OR), lo, RID_ESP, 0); | |
| 1034 emit_loadi(as, lo, 0xc00); | |
| 1035 emit_rmro(as, XO_FNSTCW, XOg_FNSTCW, RID_ESP, 0); | |
| 1036 } | |
| 1037 if (dt == IRT_U64) | |
| 1038 emit_x87op(as, XI_FDUP); | |
| 1039 emit_mrm(as, st == IRT_NUM ? XO_FLDq : XO_FLDd, | |
| 1040 st == IRT_NUM ? XOg_FLDq: XOg_FLDd, | |
| 1041 asm_fuseload(as, ir->op1, RSET_EMPTY)); | |
| 1042 } | |
| 1043 | |
| 1044 static void asm_conv64(ASMState *as, IRIns *ir) | |
| 1045 { | |
| 1046 if (irt_isfp(ir->t)) | |
| 1047 asm_conv_fp_int64(as, ir); | |
| 1048 else | |
| 1049 asm_conv_int64_fp(as, ir); | |
| 1050 } | |
| 1051 #endif | |
| 1052 | |
| 1053 static void asm_strto(ASMState *as, IRIns *ir) | |
| 1054 { | |
| 1055 /* Force a spill slot for the destination register (if any). */ | |
| 1056 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_strscan_num]; | |
| 1057 IRRef args[2]; | |
| 1058 RegSet drop = RSET_SCRATCH; | |
| 1059 if ((drop & RSET_FPR) != RSET_FPR && ra_hasreg(ir->r)) | |
| 1060 rset_set(drop, ir->r); /* WIN64 doesn't spill all FPRs. */ | |
| 1061 ra_evictset(as, drop); | |
| 1062 asm_guardcc(as, CC_E); | |
| 1063 emit_rr(as, XO_TEST, RID_RET, RID_RET); /* Test return status. */ | |
| 1064 args[0] = ir->op1; /* GCstr *str */ | |
| 1065 args[1] = ASMREF_TMP1; /* TValue *n */ | |
| 1066 asm_gencall(as, ci, args); | |
| 1067 /* Store the result to the spill slot or temp slots. */ | |
| 1068 emit_rmro(as, XO_LEA, ra_releasetmp(as, ASMREF_TMP1)|REX_64, | |
| 1069 RID_ESP, sps_scale(ir->s)); | |
| 1070 } | |
| 1071 | |
| 1072 /* -- Memory references --------------------------------------------------- */ | |
| 1073 | |
| 1074 /* Get pointer to TValue. */ | |
| 1075 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode) | |
| 1076 { | |
| 1077 if ((mode & IRTMPREF_IN1)) { | |
| 1078 IRIns *ir = IR(ref); | |
| 1079 if (irt_isnum(ir->t)) { | |
| 1080 if (irref_isk(ref) && !(mode & IRTMPREF_OUT1)) { | |
| 1081 /* Use the number constant itself as a TValue. */ | |
| 1082 emit_loada(as, dest, ir_knum(ir)); | |
| 1083 return; | |
| 1084 } | |
| 1085 emit_rmro(as, XO_MOVSDto, ra_alloc1(as, ref, RSET_FPR), dest, 0); | |
| 1086 } else { | |
| 1087 #if LJ_GC64 | |
| 1088 if (irref_isk(ref)) { | |
| 1089 TValue k; | |
| 1090 lj_ir_kvalue(as->J->L, &k, ir); | |
| 1091 emit_movmroi(as, dest, 4, k.u32.hi); | |
| 1092 emit_movmroi(as, dest, 0, k.u32.lo); | |
| 1093 } else { | |
| 1094 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */ | |
| 1095 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest)); | |
| 1096 if (irt_is64(ir->t)) { | |
| 1097 emit_u32(as, irt_toitype(ir->t) << 15); | |
| 1098 emit_rmro(as, XO_ARITHi, XOg_OR, dest, 4); | |
| 1099 } else { | |
| 1100 emit_movmroi(as, dest, 4, (irt_toitype(ir->t) << 15)); | |
| 1101 } | |
| 1102 emit_movtomro(as, REX_64IR(ir, src), dest, 0); | |
| 1103 } | |
| 1104 #else | |
| 1105 if (!irref_isk(ref)) { | |
| 1106 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, dest)); | |
| 1107 emit_movtomro(as, REX_64IR(ir, src), dest, 0); | |
| 1108 } else if (!irt_ispri(ir->t)) { | |
| 1109 emit_movmroi(as, dest, 0, ir->i); | |
| 1110 } | |
| 1111 if (!(LJ_64 && irt_islightud(ir->t))) | |
| 1112 emit_movmroi(as, dest, 4, irt_toitype(ir->t)); | |
| 1113 #endif | |
| 1114 } | |
| 1115 } | |
| 1116 emit_loada(as, dest, &J2G(as->J)->tmptv); /* g->tmptv holds the TValue(s). */ | |
| 1117 } | |
| 1118 | |
| 1119 static void asm_aref(ASMState *as, IRIns *ir) | |
| 1120 { | |
| 1121 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1122 asm_fusearef(as, ir, RSET_GPR); | |
| 1123 if (!(as->mrm.idx == RID_NONE && as->mrm.ofs == 0)) | |
| 1124 emit_mrm(as, XO_LEA, dest|REX_GC64, RID_MRM); | |
| 1125 else if (as->mrm.base != dest) | |
| 1126 emit_rr(as, XO_MOV, dest|REX_GC64, as->mrm.base); | |
| 1127 } | |
| 1128 | |
| 1129 /* Inlined hash lookup. Specialized for key type and for const keys. | |
| 1130 ** The equivalent C code is: | |
| 1131 ** Node *n = hashkey(t, key); | |
| 1132 ** do { | |
| 1133 ** if (lj_obj_equal(&n->key, key)) return &n->val; | |
| 1134 ** } while ((n = nextnode(n))); | |
| 1135 ** return niltv(L); | |
| 1136 */ | |
| 1137 static void asm_href(ASMState *as, IRIns *ir, IROp merge) | |
| 1138 { | |
| 1139 RegSet allow = RSET_GPR; | |
| 1140 int destused = ra_used(ir); | |
| 1141 Reg dest = ra_dest(as, ir, allow); | |
| 1142 Reg tab = ra_alloc1(as, ir->op1, rset_clear(allow, dest)); | |
| 1143 Reg key = RID_NONE, tmp = RID_NONE; | |
| 1144 IRIns *irkey = IR(ir->op2); | |
| 1145 int isk = irref_isk(ir->op2); | |
| 1146 IRType1 kt = irkey->t; | |
| 1147 uint32_t khash; | |
| 1148 MCLabel l_end, l_loop, l_next; | |
| 1149 | |
| 1150 if (!isk) { | |
| 1151 rset_clear(allow, tab); | |
| 1152 key = ra_alloc1(as, ir->op2, irt_isnum(kt) ? RSET_FPR : allow); | |
| 1153 if (LJ_GC64 || !irt_isstr(kt)) | |
| 1154 tmp = ra_scratch(as, rset_exclude(allow, key)); | |
| 1155 } | |
| 1156 | |
| 1157 /* Key not found in chain: jump to exit (if merged) or load niltv. */ | |
| 1158 l_end = emit_label(as); | |
| 1159 if (merge == IR_NE) | |
| 1160 asm_guardcc(as, CC_E); /* XI_JMP is not found by lj_asm_patchexit. */ | |
| 1161 else if (destused) | |
| 1162 emit_loada(as, dest, niltvg(J2G(as->J))); | |
| 1163 | |
| 1164 /* Follow hash chain until the end. */ | |
| 1165 l_loop = emit_sjcc_label(as, CC_NZ); | |
| 1166 emit_rr(as, XO_TEST, dest|REX_GC64, dest); | |
| 1167 emit_rmro(as, XO_MOV, dest|REX_GC64, dest, offsetof(Node, next)); | |
| 1168 l_next = emit_label(as); | |
| 1169 | |
| 1170 /* Type and value comparison. */ | |
| 1171 if (merge == IR_EQ) | |
| 1172 asm_guardcc(as, CC_E); | |
| 1173 else | |
| 1174 emit_sjcc(as, CC_E, l_end); | |
| 1175 if (irt_isnum(kt)) { | |
| 1176 if (isk) { | |
| 1177 /* Assumes -0.0 is already canonicalized to +0.0. */ | |
| 1178 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo), | |
| 1179 (int32_t)ir_knum(irkey)->u32.lo); | |
| 1180 emit_sjcc(as, CC_NE, l_next); | |
| 1181 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi), | |
| 1182 (int32_t)ir_knum(irkey)->u32.hi); | |
| 1183 } else { | |
| 1184 emit_sjcc(as, CC_P, l_next); | |
| 1185 emit_rmro(as, XO_UCOMISD, key, dest, offsetof(Node, key.n)); | |
| 1186 emit_sjcc(as, CC_AE, l_next); | |
| 1187 /* The type check avoids NaN penalties and complaints from Valgrind. */ | |
| 1188 #if LJ_64 && !LJ_GC64 | |
| 1189 emit_u32(as, LJ_TISNUM); | |
| 1190 emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it)); | |
| 1191 #else | |
| 1192 emit_i8(as, LJ_TISNUM); | |
| 1193 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); | |
| 1194 #endif | |
| 1195 } | |
| 1196 #if LJ_64 && !LJ_GC64 | |
| 1197 } else if (irt_islightud(kt)) { | |
| 1198 emit_rmro(as, XO_CMP, key|REX_64, dest, offsetof(Node, key.u64)); | |
| 1199 #endif | |
| 1200 #if LJ_GC64 | |
| 1201 } else if (irt_isaddr(kt)) { | |
| 1202 if (isk) { | |
| 1203 TValue k; | |
| 1204 k.u64 = ((uint64_t)irt_toitype(irkey->t) << 47) | irkey[1].tv.u64; | |
| 1205 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.lo), | |
| 1206 k.u32.lo); | |
| 1207 emit_sjcc(as, CC_NE, l_next); | |
| 1208 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.u32.hi), | |
| 1209 k.u32.hi); | |
| 1210 } else { | |
| 1211 emit_rmro(as, XO_CMP, tmp|REX_64, dest, offsetof(Node, key.u64)); | |
| 1212 } | |
| 1213 } else { | |
| 1214 lj_assertA(irt_ispri(kt) && !irt_isnil(kt), "bad HREF key type"); | |
| 1215 emit_u32(as, (irt_toitype(kt)<<15)|0x7fff); | |
| 1216 emit_rmro(as, XO_ARITHi, XOg_CMP, dest, offsetof(Node, key.it)); | |
| 1217 #else | |
| 1218 } else { | |
| 1219 if (!irt_ispri(kt)) { | |
| 1220 lj_assertA(irt_isaddr(kt), "bad HREF key type"); | |
| 1221 if (isk) | |
| 1222 emit_gmroi(as, XG_ARITHi(XOg_CMP), dest, offsetof(Node, key.gcr), | |
| 1223 ptr2addr(ir_kgc(irkey))); | |
| 1224 else | |
| 1225 emit_rmro(as, XO_CMP, key, dest, offsetof(Node, key.gcr)); | |
| 1226 emit_sjcc(as, CC_NE, l_next); | |
| 1227 } | |
| 1228 lj_assertA(!irt_isnil(kt), "bad HREF key type"); | |
| 1229 emit_i8(as, irt_toitype(kt)); | |
| 1230 emit_rmro(as, XO_ARITHi8, XOg_CMP, dest, offsetof(Node, key.it)); | |
| 1231 #endif | |
| 1232 } | |
| 1233 emit_sfixup(as, l_loop); | |
| 1234 checkmclim(as); | |
| 1235 #if LJ_GC64 | |
| 1236 if (!isk && irt_isaddr(kt)) { | |
| 1237 emit_rr(as, XO_OR, tmp|REX_64, key); | |
| 1238 emit_loadu64(as, tmp, (uint64_t)irt_toitype(kt) << 47); | |
| 1239 } | |
| 1240 #endif | |
| 1241 | |
| 1242 /* Load main position relative to tab->node into dest. */ | |
| 1243 khash = isk ? ir_khash(as, irkey) : 1; | |
| 1244 if (khash == 0) { | |
| 1245 emit_rmro(as, XO_MOV, dest|REX_GC64, tab, offsetof(GCtab, node)); | |
| 1246 } else { | |
| 1247 emit_rmro(as, XO_ARITH(XOg_ADD), dest|REX_GC64, tab, offsetof(GCtab,node)); | |
| 1248 emit_shifti(as, XOg_SHL, dest, 3); | |
| 1249 emit_rmrxo(as, XO_LEA, dest, dest, dest, XM_SCALE2, 0); | |
| 1250 if (isk) { | |
| 1251 emit_gri(as, XG_ARITHi(XOg_AND), dest, (int32_t)khash); | |
| 1252 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); | |
| 1253 } else if (irt_isstr(kt)) { | |
| 1254 emit_rmro(as, XO_ARITH(XOg_AND), dest, key, offsetof(GCstr, sid)); | |
| 1255 emit_rmro(as, XO_MOV, dest, tab, offsetof(GCtab, hmask)); | |
| 1256 } else { /* Must match with hashrot() in lj_tab.c. */ | |
| 1257 emit_rmro(as, XO_ARITH(XOg_AND), dest, tab, offsetof(GCtab, hmask)); | |
| 1258 emit_rr(as, XO_ARITH(XOg_SUB), dest, tmp); | |
| 1259 emit_shifti(as, XOg_ROL, tmp, HASH_ROT3); | |
| 1260 emit_rr(as, XO_ARITH(XOg_XOR), dest, tmp); | |
| 1261 emit_shifti(as, XOg_ROL, dest, HASH_ROT2); | |
| 1262 emit_rr(as, XO_ARITH(XOg_SUB), tmp, dest); | |
| 1263 emit_shifti(as, XOg_ROL, dest, HASH_ROT1); | |
| 1264 emit_rr(as, XO_ARITH(XOg_XOR), tmp, dest); | |
| 1265 if (irt_isnum(kt)) { | |
| 1266 emit_rr(as, XO_ARITH(XOg_ADD), dest, dest); | |
| 1267 #if LJ_64 | |
| 1268 emit_shifti(as, XOg_SHR|REX_64, dest, 32); | |
| 1269 emit_rr(as, XO_MOV, tmp, dest); | |
| 1270 emit_rr(as, XO_MOVDto, key|REX_64, dest); | |
| 1271 #else | |
| 1272 emit_rmro(as, XO_MOV, dest, RID_ESP, ra_spill(as, irkey)+4); | |
| 1273 emit_rr(as, XO_MOVDto, key, tmp); | |
| 1274 #endif | |
| 1275 } else { | |
| 1276 emit_rr(as, XO_MOV, tmp, key); | |
| 1277 #if LJ_GC64 | |
| 1278 checkmclim(as); | |
| 1279 emit_gri(as, XG_ARITHi(XOg_XOR), dest, irt_toitype(kt) << 15); | |
| 1280 if ((as->flags & JIT_F_BMI2)) { | |
| 1281 emit_i8(as, 32); | |
| 1282 emit_mrm(as, XV_RORX|VEX_64, dest, key); | |
| 1283 } else { | |
| 1284 emit_shifti(as, XOg_SHR|REX_64, dest, 32); | |
| 1285 emit_rr(as, XO_MOV, dest|REX_64, key|REX_64); | |
| 1286 } | |
| 1287 #else | |
| 1288 emit_rmro(as, XO_LEA, dest, key, HASH_BIAS); | |
| 1289 #endif | |
| 1290 } | |
| 1291 } | |
| 1292 } | |
| 1293 } | |
| 1294 | |
| 1295 static void asm_hrefk(ASMState *as, IRIns *ir) | |
| 1296 { | |
| 1297 IRIns *kslot = IR(ir->op2); | |
| 1298 IRIns *irkey = IR(kslot->op1); | |
| 1299 int32_t ofs = (int32_t)(kslot->op2 * sizeof(Node)); | |
| 1300 Reg dest = ra_used(ir) ? ra_dest(as, ir, RSET_GPR) : RID_NONE; | |
| 1301 Reg node = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 1302 #if !LJ_64 | |
| 1303 MCLabel l_exit; | |
| 1304 #endif | |
| 1305 lj_assertA(ofs % sizeof(Node) == 0, "unaligned HREFK slot"); | |
| 1306 if (ra_hasreg(dest)) { | |
| 1307 if (ofs != 0) { | |
| 1308 if (dest == node) | |
| 1309 emit_gri(as, XG_ARITHi(XOg_ADD), dest|REX_GC64, ofs); | |
| 1310 else | |
| 1311 emit_rmro(as, XO_LEA, dest|REX_GC64, node, ofs); | |
| 1312 } else if (dest != node) { | |
| 1313 emit_rr(as, XO_MOV, dest|REX_GC64, node); | |
| 1314 } | |
| 1315 } | |
| 1316 asm_guardcc(as, CC_NE); | |
| 1317 #if LJ_64 | |
| 1318 if (!irt_ispri(irkey->t)) { | |
| 1319 Reg key = ra_scratch(as, rset_exclude(RSET_GPR, node)); | |
| 1320 emit_rmro(as, XO_CMP, key|REX_64, node, | |
| 1321 ofs + (int32_t)offsetof(Node, key.u64)); | |
| 1322 lj_assertA(irt_isnum(irkey->t) || irt_isgcv(irkey->t), | |
| 1323 "bad HREFK key type"); | |
| 1324 /* Assumes -0.0 is already canonicalized to +0.0. */ | |
| 1325 emit_loadu64(as, key, irt_isnum(irkey->t) ? ir_knum(irkey)->u64 : | |
| 1326 #if LJ_GC64 | |
| 1327 ((uint64_t)irt_toitype(irkey->t) << 47) | | |
| 1328 (uint64_t)ir_kgc(irkey)); | |
| 1329 #else | |
| 1330 ((uint64_t)irt_toitype(irkey->t) << 32) | | |
| 1331 (uint64_t)(uint32_t)ptr2addr(ir_kgc(irkey))); | |
| 1332 #endif | |
| 1333 } else { | |
| 1334 lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type"); | |
| 1335 #if LJ_GC64 | |
| 1336 emit_i32(as, (irt_toitype(irkey->t)<<15)|0x7fff); | |
| 1337 emit_rmro(as, XO_ARITHi, XOg_CMP, node, | |
| 1338 ofs + (int32_t)offsetof(Node, key.it)); | |
| 1339 #else | |
| 1340 emit_i8(as, irt_toitype(irkey->t)); | |
| 1341 emit_rmro(as, XO_ARITHi8, XOg_CMP, node, | |
| 1342 ofs + (int32_t)offsetof(Node, key.it)); | |
| 1343 #endif | |
| 1344 } | |
| 1345 #else | |
| 1346 l_exit = emit_label(as); | |
| 1347 if (irt_isnum(irkey->t)) { | |
| 1348 /* Assumes -0.0 is already canonicalized to +0.0. */ | |
| 1349 emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | |
| 1350 ofs + (int32_t)offsetof(Node, key.u32.lo), | |
| 1351 (int32_t)ir_knum(irkey)->u32.lo); | |
| 1352 emit_sjcc(as, CC_NE, l_exit); | |
| 1353 emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | |
| 1354 ofs + (int32_t)offsetof(Node, key.u32.hi), | |
| 1355 (int32_t)ir_knum(irkey)->u32.hi); | |
| 1356 } else { | |
| 1357 if (!irt_ispri(irkey->t)) { | |
| 1358 lj_assertA(irt_isgcv(irkey->t), "bad HREFK key type"); | |
| 1359 emit_gmroi(as, XG_ARITHi(XOg_CMP), node, | |
| 1360 ofs + (int32_t)offsetof(Node, key.gcr), | |
| 1361 ptr2addr(ir_kgc(irkey))); | |
| 1362 emit_sjcc(as, CC_NE, l_exit); | |
| 1363 } | |
| 1364 lj_assertA(!irt_isnil(irkey->t), "bad HREFK key type"); | |
| 1365 emit_i8(as, irt_toitype(irkey->t)); | |
| 1366 emit_rmro(as, XO_ARITHi8, XOg_CMP, node, | |
| 1367 ofs + (int32_t)offsetof(Node, key.it)); | |
| 1368 } | |
| 1369 #endif | |
| 1370 } | |
| 1371 | |
| 1372 static void asm_uref(ASMState *as, IRIns *ir) | |
| 1373 { | |
| 1374 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1375 if (irref_isk(ir->op1)) { | |
| 1376 GCfunc *fn = ir_kfunc(IR(ir->op1)); | |
| 1377 MRef *v = &gcref(fn->l.uvptr[(ir->op2 >> 8)])->uv.v; | |
| 1378 emit_rma(as, XO_MOV, dest|REX_GC64, v); | |
| 1379 } else { | |
| 1380 Reg uv = ra_scratch(as, RSET_GPR); | |
| 1381 Reg func = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 1382 if (ir->o == IR_UREFC) { | |
| 1383 emit_rmro(as, XO_LEA, dest|REX_GC64, uv, offsetof(GCupval, tv)); | |
| 1384 asm_guardcc(as, CC_NE); | |
| 1385 emit_i8(as, 1); | |
| 1386 emit_rmro(as, XO_ARITHib, XOg_CMP, uv, offsetof(GCupval, closed)); | |
| 1387 } else { | |
| 1388 emit_rmro(as, XO_MOV, dest|REX_GC64, uv, offsetof(GCupval, v)); | |
| 1389 } | |
| 1390 emit_rmro(as, XO_MOV, uv|REX_GC64, func, | |
| 1391 (int32_t)offsetof(GCfuncL, uvptr) + | |
| 1392 (int32_t)sizeof(MRef) * (int32_t)(ir->op2 >> 8)); | |
| 1393 } | |
| 1394 } | |
| 1395 | |
| 1396 static void asm_fref(ASMState *as, IRIns *ir) | |
| 1397 { | |
| 1398 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1399 asm_fusefref(as, ir, RSET_GPR); | |
| 1400 emit_mrm(as, XO_LEA, dest, RID_MRM); | |
| 1401 } | |
| 1402 | |
| 1403 static void asm_strref(ASMState *as, IRIns *ir) | |
| 1404 { | |
| 1405 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1406 asm_fusestrref(as, ir, RSET_GPR); | |
| 1407 if (as->mrm.base == RID_NONE) | |
| 1408 emit_loadi(as, dest, as->mrm.ofs); | |
| 1409 else if (as->mrm.base == dest && as->mrm.idx == RID_NONE) | |
| 1410 emit_gri(as, XG_ARITHi(XOg_ADD), dest|REX_GC64, as->mrm.ofs); | |
| 1411 else | |
| 1412 emit_mrm(as, XO_LEA, dest|REX_GC64, RID_MRM); | |
| 1413 } | |
| 1414 | |
| 1415 /* -- Loads and stores ---------------------------------------------------- */ | |
| 1416 | |
| 1417 static void asm_fxload(ASMState *as, IRIns *ir) | |
| 1418 { | |
| 1419 Reg dest = ra_dest(as, ir, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); | |
| 1420 x86Op xo; | |
| 1421 if (ir->o == IR_FLOAD) | |
| 1422 asm_fusefref(as, ir, RSET_GPR); | |
| 1423 else | |
| 1424 asm_fusexref(as, ir->op1, RSET_GPR); | |
| 1425 /* ir->op2 is ignored -- unaligned loads are ok on x86. */ | |
| 1426 switch (irt_type(ir->t)) { | |
| 1427 case IRT_I8: xo = XO_MOVSXb; break; | |
| 1428 case IRT_U8: xo = XO_MOVZXb; break; | |
| 1429 case IRT_I16: xo = XO_MOVSXw; break; | |
| 1430 case IRT_U16: xo = XO_MOVZXw; break; | |
| 1431 case IRT_NUM: xo = XO_MOVSD; break; | |
| 1432 case IRT_FLOAT: xo = XO_MOVSS; break; | |
| 1433 default: | |
| 1434 if (LJ_64 && irt_is64(ir->t)) | |
| 1435 dest |= REX_64; | |
| 1436 else | |
| 1437 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t), | |
| 1438 "unsplit 64 bit load"); | |
| 1439 xo = XO_MOV; | |
| 1440 break; | |
| 1441 } | |
| 1442 emit_mrm(as, xo, dest, RID_MRM); | |
| 1443 } | |
| 1444 | |
| 1445 #define asm_fload(as, ir) asm_fxload(as, ir) | |
| 1446 #define asm_xload(as, ir) asm_fxload(as, ir) | |
| 1447 | |
| 1448 static void asm_fxstore(ASMState *as, IRIns *ir) | |
| 1449 { | |
| 1450 RegSet allow = RSET_GPR; | |
| 1451 Reg src = RID_NONE, osrc = RID_NONE; | |
| 1452 int32_t k = 0; | |
| 1453 if (ir->r == RID_SINK) | |
| 1454 return; | |
| 1455 /* The IRT_I16/IRT_U16 stores should never be simplified for constant | |
| 1456 ** values since mov word [mem], imm16 has a length-changing prefix. | |
| 1457 */ | |
| 1458 if (irt_isi16(ir->t) || irt_isu16(ir->t) || irt_isfp(ir->t) || | |
| 1459 !asm_isk32(as, ir->op2, &k)) { | |
| 1460 RegSet allow8 = irt_isfp(ir->t) ? RSET_FPR : | |
| 1461 (irt_isi8(ir->t) || irt_isu8(ir->t)) ? RSET_GPR8 : RSET_GPR; | |
| 1462 src = osrc = ra_alloc1(as, ir->op2, allow8); | |
| 1463 if (!LJ_64 && !rset_test(allow8, src)) { /* Already in wrong register. */ | |
| 1464 rset_clear(allow, osrc); | |
| 1465 src = ra_scratch(as, allow8); | |
| 1466 } | |
| 1467 rset_clear(allow, src); | |
| 1468 } | |
| 1469 if (ir->o == IR_FSTORE) { | |
| 1470 asm_fusefref(as, IR(ir->op1), allow); | |
| 1471 } else { | |
| 1472 asm_fusexref(as, ir->op1, allow); | |
| 1473 if (LJ_32 && ir->o == IR_HIOP) as->mrm.ofs += 4; | |
| 1474 } | |
| 1475 if (ra_hasreg(src)) { | |
| 1476 x86Op xo; | |
| 1477 switch (irt_type(ir->t)) { | |
| 1478 case IRT_I8: case IRT_U8: xo = XO_MOVtob; src |= FORCE_REX; break; | |
| 1479 case IRT_I16: case IRT_U16: xo = XO_MOVtow; break; | |
| 1480 case IRT_NUM: xo = XO_MOVSDto; break; | |
| 1481 case IRT_FLOAT: xo = XO_MOVSSto; break; | |
| 1482 #if LJ_64 && !LJ_GC64 | |
| 1483 case IRT_LIGHTUD: | |
| 1484 /* NYI: mask 64 bit lightuserdata. */ | |
| 1485 lj_assertA(0, "store of lightuserdata"); | |
| 1486 #endif | |
| 1487 default: | |
| 1488 if (LJ_64 && irt_is64(ir->t)) | |
| 1489 src |= REX_64; | |
| 1490 else | |
| 1491 lj_assertA(irt_isint(ir->t) || irt_isu32(ir->t) || irt_isaddr(ir->t), | |
| 1492 "unsplit 64 bit store"); | |
| 1493 xo = XO_MOVto; | |
| 1494 break; | |
| 1495 } | |
| 1496 emit_mrm(as, xo, src, RID_MRM); | |
| 1497 if (!LJ_64 && src != osrc) { | |
| 1498 ra_noweak(as, osrc); | |
| 1499 emit_rr(as, XO_MOV, src, osrc); | |
| 1500 } | |
| 1501 } else { | |
| 1502 if (irt_isi8(ir->t) || irt_isu8(ir->t)) { | |
| 1503 emit_i8(as, k); | |
| 1504 emit_mrm(as, XO_MOVmib, 0, RID_MRM); | |
| 1505 } else { | |
| 1506 lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) || irt_isu32(ir->t) || | |
| 1507 irt_isaddr(ir->t), "bad store type"); | |
| 1508 emit_i32(as, k); | |
| 1509 emit_mrm(as, XO_MOVmi, REX_64IR(ir, 0), RID_MRM); | |
| 1510 } | |
| 1511 } | |
| 1512 } | |
| 1513 | |
| 1514 #define asm_fstore(as, ir) asm_fxstore(as, ir) | |
| 1515 #define asm_xstore(as, ir) asm_fxstore(as, ir) | |
| 1516 | |
| 1517 #if LJ_64 && !LJ_GC64 | |
| 1518 static Reg asm_load_lightud64(ASMState *as, IRIns *ir, int typecheck) | |
| 1519 { | |
| 1520 if (ra_used(ir) || typecheck) { | |
| 1521 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 1522 if (typecheck) { | |
| 1523 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, dest)); | |
| 1524 asm_guardcc(as, CC_NE); | |
| 1525 emit_i8(as, -2); | |
| 1526 emit_rr(as, XO_ARITHi8, XOg_CMP, tmp); | |
| 1527 emit_shifti(as, XOg_SAR|REX_64, tmp, 47); | |
| 1528 emit_rr(as, XO_MOV, tmp|REX_64, dest); | |
| 1529 } | |
| 1530 return dest; | |
| 1531 } else { | |
| 1532 return RID_NONE; | |
| 1533 } | |
| 1534 } | |
| 1535 #endif | |
| 1536 | |
| 1537 static void asm_ahuvload(ASMState *as, IRIns *ir) | |
| 1538 { | |
| 1539 #if LJ_GC64 | |
| 1540 Reg tmp = RID_NONE; | |
| 1541 #endif | |
| 1542 lj_assertA(irt_isnum(ir->t) || irt_ispri(ir->t) || irt_isaddr(ir->t) || | |
| 1543 (LJ_DUALNUM && irt_isint(ir->t)), | |
| 1544 "bad load type %d", irt_type(ir->t)); | |
| 1545 #if LJ_64 && !LJ_GC64 | |
| 1546 if (irt_islightud(ir->t)) { | |
| 1547 Reg dest = asm_load_lightud64(as, ir, 1); | |
| 1548 if (ra_hasreg(dest)) { | |
| 1549 asm_fuseahuref(as, ir->op1, RSET_GPR); | |
| 1550 if (ir->o == IR_VLOAD) as->mrm.ofs += 8 * ir->op2; | |
| 1551 emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM); | |
| 1552 } | |
| 1553 return; | |
| 1554 } else | |
| 1555 #endif | |
| 1556 if (ra_used(ir)) { | |
| 1557 RegSet allow = irt_isnum(ir->t) ? RSET_FPR : RSET_GPR; | |
| 1558 Reg dest = ra_dest(as, ir, allow); | |
| 1559 asm_fuseahuref(as, ir->op1, RSET_GPR); | |
| 1560 if (ir->o == IR_VLOAD) as->mrm.ofs += 8 * ir->op2; | |
| 1561 #if LJ_GC64 | |
| 1562 if (irt_isaddr(ir->t)) { | |
| 1563 emit_shifti(as, XOg_SHR|REX_64, dest, 17); | |
| 1564 asm_guardcc(as, CC_NE); | |
| 1565 emit_i8(as, irt_toitype(ir->t)); | |
| 1566 emit_rr(as, XO_ARITHi8, XOg_CMP, dest); | |
| 1567 emit_i8(as, XI_O16); | |
| 1568 if ((as->flags & JIT_F_BMI2)) { | |
| 1569 emit_i8(as, 47); | |
| 1570 emit_mrm(as, XV_RORX|VEX_64, dest, RID_MRM); | |
| 1571 } else { | |
| 1572 emit_shifti(as, XOg_ROR|REX_64, dest, 47); | |
| 1573 emit_mrm(as, XO_MOV, dest|REX_64, RID_MRM); | |
| 1574 } | |
| 1575 return; | |
| 1576 } else | |
| 1577 #endif | |
| 1578 emit_mrm(as, dest < RID_MAX_GPR ? XO_MOV : XO_MOVSD, dest, RID_MRM); | |
| 1579 } else { | |
| 1580 RegSet gpr = RSET_GPR; | |
| 1581 #if LJ_GC64 | |
| 1582 if (irt_isaddr(ir->t)) { | |
| 1583 tmp = ra_scratch(as, RSET_GPR); | |
| 1584 gpr = rset_exclude(gpr, tmp); | |
| 1585 } | |
| 1586 #endif | |
| 1587 asm_fuseahuref(as, ir->op1, gpr); | |
| 1588 if (ir->o == IR_VLOAD) as->mrm.ofs += 8 * ir->op2; | |
| 1589 } | |
| 1590 /* Always do the type check, even if the load result is unused. */ | |
| 1591 as->mrm.ofs += 4; | |
| 1592 asm_guardcc(as, irt_isnum(ir->t) ? CC_AE : CC_NE); | |
| 1593 if (LJ_64 && irt_type(ir->t) >= IRT_NUM) { | |
| 1594 lj_assertA(irt_isinteger(ir->t) || irt_isnum(ir->t), | |
| 1595 "bad load type %d", irt_type(ir->t)); | |
| 1596 #if LJ_GC64 | |
| 1597 emit_u32(as, LJ_TISNUM << 15); | |
| 1598 #else | |
| 1599 emit_u32(as, LJ_TISNUM); | |
| 1600 #endif | |
| 1601 emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM); | |
| 1602 #if LJ_GC64 | |
| 1603 } else if (irt_isaddr(ir->t)) { | |
| 1604 as->mrm.ofs -= 4; | |
| 1605 emit_i8(as, irt_toitype(ir->t)); | |
| 1606 emit_mrm(as, XO_ARITHi8, XOg_CMP, tmp); | |
| 1607 emit_shifti(as, XOg_SAR|REX_64, tmp, 47); | |
| 1608 emit_mrm(as, XO_MOV, tmp|REX_64, RID_MRM); | |
| 1609 } else if (irt_isnil(ir->t)) { | |
| 1610 as->mrm.ofs -= 4; | |
| 1611 emit_i8(as, -1); | |
| 1612 emit_mrm(as, XO_ARITHi8, XOg_CMP|REX_64, RID_MRM); | |
| 1613 } else { | |
| 1614 emit_u32(as, (irt_toitype(ir->t) << 15) | 0x7fff); | |
| 1615 emit_mrm(as, XO_ARITHi, XOg_CMP, RID_MRM); | |
| 1616 #else | |
| 1617 } else { | |
| 1618 emit_i8(as, irt_toitype(ir->t)); | |
| 1619 emit_mrm(as, XO_ARITHi8, XOg_CMP, RID_MRM); | |
| 1620 #endif | |
| 1621 } | |
| 1622 } | |
| 1623 | |
| 1624 static void asm_ahustore(ASMState *as, IRIns *ir) | |
| 1625 { | |
| 1626 if (ir->r == RID_SINK) | |
| 1627 return; | |
| 1628 if (irt_isnum(ir->t)) { | |
| 1629 Reg src = ra_alloc1(as, ir->op2, RSET_FPR); | |
| 1630 asm_fuseahuref(as, ir->op1, RSET_GPR); | |
| 1631 emit_mrm(as, XO_MOVSDto, src, RID_MRM); | |
| 1632 #if LJ_64 && !LJ_GC64 | |
| 1633 } else if (irt_islightud(ir->t)) { | |
| 1634 Reg src = ra_alloc1(as, ir->op2, RSET_GPR); | |
| 1635 asm_fuseahuref(as, ir->op1, rset_exclude(RSET_GPR, src)); | |
| 1636 emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM); | |
| 1637 #endif | |
| 1638 #if LJ_GC64 | |
| 1639 } else if (irref_isk(ir->op2)) { | |
| 1640 TValue k; | |
| 1641 lj_ir_kvalue(as->J->L, &k, IR(ir->op2)); | |
| 1642 asm_fuseahuref(as, ir->op1, RSET_GPR); | |
| 1643 if (tvisnil(&k)) { | |
| 1644 emit_i32(as, -1); | |
| 1645 emit_mrm(as, XO_MOVmi, REX_64, RID_MRM); | |
| 1646 } else { | |
| 1647 emit_u32(as, k.u32.lo); | |
| 1648 emit_mrm(as, XO_MOVmi, 0, RID_MRM); | |
| 1649 as->mrm.ofs += 4; | |
| 1650 emit_u32(as, k.u32.hi); | |
| 1651 emit_mrm(as, XO_MOVmi, 0, RID_MRM); | |
| 1652 } | |
| 1653 #endif | |
| 1654 } else { | |
| 1655 IRIns *irr = IR(ir->op2); | |
| 1656 RegSet allow = RSET_GPR; | |
| 1657 Reg src = RID_NONE; | |
| 1658 if (!irref_isk(ir->op2)) { | |
| 1659 src = ra_alloc1(as, ir->op2, allow); | |
| 1660 rset_clear(allow, src); | |
| 1661 } | |
| 1662 asm_fuseahuref(as, ir->op1, allow); | |
| 1663 if (ra_hasreg(src)) { | |
| 1664 #if LJ_GC64 | |
| 1665 if (!(LJ_DUALNUM && irt_isinteger(ir->t))) { | |
| 1666 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */ | |
| 1667 as->mrm.ofs += 4; | |
| 1668 emit_u32(as, irt_toitype(ir->t) << 15); | |
| 1669 emit_mrm(as, XO_ARITHi, XOg_OR, RID_MRM); | |
| 1670 as->mrm.ofs -= 4; | |
| 1671 emit_mrm(as, XO_MOVto, src|REX_64, RID_MRM); | |
| 1672 return; | |
| 1673 } | |
| 1674 #endif | |
| 1675 emit_mrm(as, XO_MOVto, src, RID_MRM); | |
| 1676 } else if (!irt_ispri(irr->t)) { | |
| 1677 lj_assertA(irt_isaddr(ir->t) || (LJ_DUALNUM && irt_isinteger(ir->t)), | |
| 1678 "bad store type"); | |
| 1679 emit_i32(as, irr->i); | |
| 1680 emit_mrm(as, XO_MOVmi, 0, RID_MRM); | |
| 1681 } | |
| 1682 as->mrm.ofs += 4; | |
| 1683 #if LJ_GC64 | |
| 1684 lj_assertA(LJ_DUALNUM && irt_isinteger(ir->t), "bad store type"); | |
| 1685 emit_i32(as, LJ_TNUMX << 15); | |
| 1686 #else | |
| 1687 emit_i32(as, (int32_t)irt_toitype(ir->t)); | |
| 1688 #endif | |
| 1689 emit_mrm(as, XO_MOVmi, 0, RID_MRM); | |
| 1690 } | |
| 1691 } | |
| 1692 | |
| 1693 static void asm_sload(ASMState *as, IRIns *ir) | |
| 1694 { | |
| 1695 int32_t ofs = 8*((int32_t)ir->op1-1-LJ_FR2) + | |
| 1696 (!LJ_FR2 && (ir->op2 & IRSLOAD_FRAME) ? 4 : 0); | |
| 1697 IRType1 t = ir->t; | |
| 1698 Reg base; | |
| 1699 lj_assertA(!(ir->op2 & IRSLOAD_PARENT), | |
| 1700 "bad parent SLOAD"); /* Handled by asm_head_side(). */ | |
| 1701 lj_assertA(irt_isguard(t) || !(ir->op2 & IRSLOAD_TYPECHECK), | |
| 1702 "inconsistent SLOAD variant"); | |
| 1703 lj_assertA(LJ_DUALNUM || | |
| 1704 !irt_isint(t) || | |
| 1705 (ir->op2 & (IRSLOAD_CONVERT|IRSLOAD_FRAME|IRSLOAD_KEYINDEX)), | |
| 1706 "bad SLOAD type"); | |
| 1707 if ((ir->op2 & IRSLOAD_CONVERT) && irt_isguard(t) && irt_isint(t)) { | |
| 1708 Reg left = ra_scratch(as, RSET_FPR); | |
| 1709 asm_tointg(as, ir, left); /* Frees dest reg. Do this before base alloc. */ | |
| 1710 base = ra_alloc1(as, REF_BASE, RSET_GPR); | |
| 1711 emit_rmro(as, XO_MOVSD, left, base, ofs); | |
| 1712 t.irt = IRT_NUM; /* Continue with a regular number type check. */ | |
| 1713 #if LJ_64 && !LJ_GC64 | |
| 1714 } else if (irt_islightud(t)) { | |
| 1715 Reg dest = asm_load_lightud64(as, ir, (ir->op2 & IRSLOAD_TYPECHECK)); | |
| 1716 if (ra_hasreg(dest)) { | |
| 1717 base = ra_alloc1(as, REF_BASE, RSET_GPR); | |
| 1718 emit_rmro(as, XO_MOV, dest|REX_64, base, ofs); | |
| 1719 } | |
| 1720 return; | |
| 1721 #endif | |
| 1722 } else if (ra_used(ir)) { | |
| 1723 RegSet allow = irt_isnum(t) ? RSET_FPR : RSET_GPR; | |
| 1724 Reg dest = ra_dest(as, ir, allow); | |
| 1725 base = ra_alloc1(as, REF_BASE, RSET_GPR); | |
| 1726 lj_assertA(irt_isnum(t) || irt_isint(t) || irt_isaddr(t), | |
| 1727 "bad SLOAD type %d", irt_type(t)); | |
| 1728 if ((ir->op2 & IRSLOAD_CONVERT)) { | |
| 1729 t.irt = irt_isint(t) ? IRT_NUM : IRT_INT; /* Check for original type. */ | |
| 1730 emit_rmro(as, irt_isint(t) ? XO_CVTSI2SD : XO_CVTTSD2SI, dest, base, ofs); | |
| 1731 } else { | |
| 1732 #if LJ_GC64 | |
| 1733 if (irt_isaddr(t)) { | |
| 1734 /* LJ_GC64 type check + tag removal without BMI2 and with BMI2: | |
| 1735 ** | |
| 1736 ** mov r64, [addr] rorx r64, [addr], 47 | |
| 1737 ** ror r64, 47 | |
| 1738 ** cmp r16, itype cmp r16, itype | |
| 1739 ** jne ->exit jne ->exit | |
| 1740 ** shr r64, 16 shr r64, 16 | |
| 1741 */ | |
| 1742 emit_shifti(as, XOg_SHR|REX_64, dest, 17); | |
| 1743 if ((ir->op2 & IRSLOAD_TYPECHECK)) { | |
| 1744 asm_guardcc(as, CC_NE); | |
| 1745 emit_i8(as, irt_toitype(t)); | |
| 1746 emit_rr(as, XO_ARITHi8, XOg_CMP, dest); | |
| 1747 emit_i8(as, XI_O16); | |
| 1748 } | |
| 1749 if ((as->flags & JIT_F_BMI2)) { | |
| 1750 emit_i8(as, 47); | |
| 1751 emit_rmro(as, XV_RORX|VEX_64, dest, base, ofs); | |
| 1752 } else { | |
| 1753 if ((ir->op2 & IRSLOAD_TYPECHECK)) | |
| 1754 emit_shifti(as, XOg_ROR|REX_64, dest, 47); | |
| 1755 else | |
| 1756 emit_shifti(as, XOg_SHL|REX_64, dest, 17); | |
| 1757 emit_rmro(as, XO_MOV, dest|REX_64, base, ofs); | |
| 1758 } | |
| 1759 return; | |
| 1760 } else | |
| 1761 #endif | |
| 1762 emit_rmro(as, irt_isnum(t) ? XO_MOVSD : XO_MOV, dest, base, ofs); | |
| 1763 } | |
| 1764 } else { | |
| 1765 if (!(ir->op2 & IRSLOAD_TYPECHECK)) | |
| 1766 return; /* No type check: avoid base alloc. */ | |
| 1767 base = ra_alloc1(as, REF_BASE, RSET_GPR); | |
| 1768 } | |
| 1769 if ((ir->op2 & IRSLOAD_TYPECHECK)) { | |
| 1770 /* Need type check, even if the load result is unused. */ | |
| 1771 asm_guardcc(as, irt_isnum(t) ? CC_AE : CC_NE); | |
| 1772 if ((LJ_64 && irt_type(t) >= IRT_NUM) || (ir->op2 & IRSLOAD_KEYINDEX)) { | |
| 1773 lj_assertA(irt_isinteger(t) || irt_isnum(t), | |
| 1774 "bad SLOAD type %d", irt_type(t)); | |
| 1775 emit_u32(as, (ir->op2 & IRSLOAD_KEYINDEX) ? LJ_KEYINDEX : | |
| 1776 LJ_GC64 ? (LJ_TISNUM << 15) : LJ_TISNUM); | |
| 1777 emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4); | |
| 1778 #if LJ_GC64 | |
| 1779 } else if (irt_isnil(t)) { | |
| 1780 /* LJ_GC64 type check for nil: | |
| 1781 ** | |
| 1782 ** cmp qword [addr], -1 | |
| 1783 ** jne ->exit | |
| 1784 */ | |
| 1785 emit_i8(as, -1); | |
| 1786 emit_rmro(as, XO_ARITHi8, XOg_CMP|REX_64, base, ofs); | |
| 1787 } else if (irt_ispri(t)) { | |
| 1788 emit_u32(as, (irt_toitype(t) << 15) | 0x7fff); | |
| 1789 emit_rmro(as, XO_ARITHi, XOg_CMP, base, ofs+4); | |
| 1790 } else { | |
| 1791 /* LJ_GC64 type check only: | |
| 1792 ** | |
| 1793 ** mov r64, [addr] | |
| 1794 ** sar r64, 47 | |
| 1795 ** cmp r32, itype | |
| 1796 ** jne ->exit | |
| 1797 */ | |
| 1798 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, base)); | |
| 1799 emit_i8(as, irt_toitype(t)); | |
| 1800 emit_rr(as, XO_ARITHi8, XOg_CMP, tmp); | |
| 1801 emit_shifti(as, XOg_SAR|REX_64, tmp, 47); | |
| 1802 emit_rmro(as, XO_MOV, tmp|REX_64, base, ofs); | |
| 1803 #else | |
| 1804 } else { | |
| 1805 emit_i8(as, irt_toitype(t)); | |
| 1806 emit_rmro(as, XO_ARITHi8, XOg_CMP, base, ofs+4); | |
| 1807 #endif | |
| 1808 } | |
| 1809 } | |
| 1810 } | |
| 1811 | |
| 1812 /* -- Allocations --------------------------------------------------------- */ | |
| 1813 | |
| 1814 #if LJ_HASFFI | |
| 1815 static void asm_cnew(ASMState *as, IRIns *ir) | |
| 1816 { | |
| 1817 CTState *cts = ctype_ctsG(J2G(as->J)); | |
| 1818 CTypeID id = (CTypeID)IR(ir->op1)->i; | |
| 1819 CTSize sz; | |
| 1820 CTInfo info = lj_ctype_info(cts, id, &sz); | |
| 1821 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_mem_newgco]; | |
| 1822 IRRef args[4]; | |
| 1823 lj_assertA(sz != CTSIZE_INVALID || (ir->o == IR_CNEW && ir->op2 != REF_NIL), | |
| 1824 "bad CNEW/CNEWI operands"); | |
| 1825 | |
| 1826 as->gcsteps++; | |
| 1827 asm_setupresult(as, ir, ci); /* GCcdata * */ | |
| 1828 | |
| 1829 /* Initialize immutable cdata object. */ | |
| 1830 if (ir->o == IR_CNEWI) { | |
| 1831 RegSet allow = (RSET_GPR & ~RSET_SCRATCH); | |
| 1832 #if LJ_64 | |
| 1833 Reg r64 = sz == 8 ? REX_64 : 0; | |
| 1834 if (irref_isk(ir->op2)) { | |
| 1835 IRIns *irk = IR(ir->op2); | |
| 1836 uint64_t k = (irk->o == IR_KINT64 || | |
| 1837 (LJ_GC64 && (irk->o == IR_KPTR || irk->o == IR_KKPTR))) ? | |
| 1838 ir_k64(irk)->u64 : (uint64_t)(uint32_t)irk->i; | |
| 1839 if (sz == 4 || checki32((int64_t)k)) { | |
| 1840 emit_i32(as, (int32_t)k); | |
| 1841 emit_rmro(as, XO_MOVmi, r64, RID_RET, sizeof(GCcdata)); | |
| 1842 } else { | |
| 1843 emit_movtomro(as, RID_ECX + r64, RID_RET, sizeof(GCcdata)); | |
| 1844 emit_loadu64(as, RID_ECX, k); | |
| 1845 } | |
| 1846 } else { | |
| 1847 Reg r = ra_alloc1(as, ir->op2, allow); | |
| 1848 emit_movtomro(as, r + r64, RID_RET, sizeof(GCcdata)); | |
| 1849 } | |
| 1850 #else | |
| 1851 int32_t ofs = sizeof(GCcdata); | |
| 1852 if (sz == 8) { | |
| 1853 ofs += 4; ir++; | |
| 1854 lj_assertA(ir->o == IR_HIOP, "missing CNEWI HIOP"); | |
| 1855 } | |
| 1856 do { | |
| 1857 if (irref_isk(ir->op2)) { | |
| 1858 emit_movmroi(as, RID_RET, ofs, IR(ir->op2)->i); | |
| 1859 } else { | |
| 1860 Reg r = ra_alloc1(as, ir->op2, allow); | |
| 1861 emit_movtomro(as, r, RID_RET, ofs); | |
| 1862 rset_clear(allow, r); | |
| 1863 } | |
| 1864 if (ofs == sizeof(GCcdata)) break; | |
| 1865 ofs -= 4; ir--; | |
| 1866 } while (1); | |
| 1867 #endif | |
| 1868 lj_assertA(sz == 4 || sz == 8, "bad CNEWI size %d", sz); | |
| 1869 } else if (ir->op2 != REF_NIL) { /* Create VLA/VLS/aligned cdata. */ | |
| 1870 ci = &lj_ir_callinfo[IRCALL_lj_cdata_newv]; | |
| 1871 args[0] = ASMREF_L; /* lua_State *L */ | |
| 1872 args[1] = ir->op1; /* CTypeID id */ | |
| 1873 args[2] = ir->op2; /* CTSize sz */ | |
| 1874 args[3] = ASMREF_TMP1; /* CTSize align */ | |
| 1875 asm_gencall(as, ci, args); | |
| 1876 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)ctype_align(info)); | |
| 1877 return; | |
| 1878 } | |
| 1879 | |
| 1880 /* Combine initialization of marked, gct and ctypeid. */ | |
| 1881 emit_movtomro(as, RID_ECX, RID_RET, offsetof(GCcdata, marked)); | |
| 1882 emit_gri(as, XG_ARITHi(XOg_OR), RID_ECX, | |
| 1883 (int32_t)((~LJ_TCDATA<<8)+(id<<16))); | |
| 1884 emit_gri(as, XG_ARITHi(XOg_AND), RID_ECX, LJ_GC_WHITES); | |
| 1885 emit_opgl(as, XO_MOVZXb, RID_ECX, gc.currentwhite); | |
| 1886 | |
| 1887 args[0] = ASMREF_L; /* lua_State *L */ | |
| 1888 args[1] = ASMREF_TMP1; /* MSize size */ | |
| 1889 asm_gencall(as, ci, args); | |
| 1890 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP1), (int32_t)(sz+sizeof(GCcdata))); | |
| 1891 } | |
| 1892 #endif | |
| 1893 | |
| 1894 /* -- Write barriers ------------------------------------------------------ */ | |
| 1895 | |
| 1896 static void asm_tbar(ASMState *as, IRIns *ir) | |
| 1897 { | |
| 1898 Reg tab = ra_alloc1(as, ir->op1, RSET_GPR); | |
| 1899 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, tab)); | |
| 1900 MCLabel l_end = emit_label(as); | |
| 1901 emit_movtomro(as, tmp|REX_GC64, tab, offsetof(GCtab, gclist)); | |
| 1902 emit_setgl(as, tab, gc.grayagain); | |
| 1903 emit_getgl(as, tmp, gc.grayagain); | |
| 1904 emit_i8(as, ~LJ_GC_BLACK); | |
| 1905 emit_rmro(as, XO_ARITHib, XOg_AND, tab, offsetof(GCtab, marked)); | |
| 1906 emit_sjcc(as, CC_Z, l_end); | |
| 1907 emit_i8(as, LJ_GC_BLACK); | |
| 1908 emit_rmro(as, XO_GROUP3b, XOg_TEST, tab, offsetof(GCtab, marked)); | |
| 1909 } | |
| 1910 | |
| 1911 static void asm_obar(ASMState *as, IRIns *ir) | |
| 1912 { | |
| 1913 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_barrieruv]; | |
| 1914 IRRef args[2]; | |
| 1915 MCLabel l_end; | |
| 1916 Reg obj; | |
| 1917 /* No need for other object barriers (yet). */ | |
| 1918 lj_assertA(IR(ir->op1)->o == IR_UREFC, "bad OBAR type"); | |
| 1919 ra_evictset(as, RSET_SCRATCH); | |
| 1920 l_end = emit_label(as); | |
| 1921 args[0] = ASMREF_TMP1; /* global_State *g */ | |
| 1922 args[1] = ir->op1; /* TValue *tv */ | |
| 1923 asm_gencall(as, ci, args); | |
| 1924 emit_loada(as, ra_releasetmp(as, ASMREF_TMP1), J2G(as->J)); | |
| 1925 obj = IR(ir->op1)->r; | |
| 1926 emit_sjcc(as, CC_Z, l_end); | |
| 1927 emit_i8(as, LJ_GC_WHITES); | |
| 1928 if (irref_isk(ir->op2)) { | |
| 1929 GCobj *vp = ir_kgc(IR(ir->op2)); | |
| 1930 emit_rma(as, XO_GROUP3b, XOg_TEST, &vp->gch.marked); | |
| 1931 } else { | |
| 1932 Reg val = ra_alloc1(as, ir->op2, rset_exclude(RSET_SCRATCH&RSET_GPR, obj)); | |
| 1933 emit_rmro(as, XO_GROUP3b, XOg_TEST, val, (int32_t)offsetof(GChead, marked)); | |
| 1934 } | |
| 1935 emit_sjcc(as, CC_Z, l_end); | |
| 1936 emit_i8(as, LJ_GC_BLACK); | |
| 1937 emit_rmro(as, XO_GROUP3b, XOg_TEST, obj, | |
| 1938 (int32_t)offsetof(GCupval, marked)-(int32_t)offsetof(GCupval, tv)); | |
| 1939 } | |
| 1940 | |
| 1941 /* -- FP/int arithmetic and logic operations ------------------------------ */ | |
| 1942 | |
| 1943 /* Load reference onto x87 stack. Force a spill to memory if needed. */ | |
| 1944 static void asm_x87load(ASMState *as, IRRef ref) | |
| 1945 { | |
| 1946 IRIns *ir = IR(ref); | |
| 1947 if (ir->o == IR_KNUM) { | |
| 1948 cTValue *tv = ir_knum(ir); | |
| 1949 if (tvispzero(tv)) /* Use fldz only for +0. */ | |
| 1950 emit_x87op(as, XI_FLDZ); | |
| 1951 else if (tvispone(tv)) | |
| 1952 emit_x87op(as, XI_FLD1); | |
| 1953 else | |
| 1954 emit_rma(as, XO_FLDq, XOg_FLDq, tv); | |
| 1955 } else if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT && !ra_used(ir) && | |
| 1956 !irref_isk(ir->op1) && mayfuse(as, ir->op1)) { | |
| 1957 IRIns *iri = IR(ir->op1); | |
| 1958 emit_rmro(as, XO_FILDd, XOg_FILDd, RID_ESP, ra_spill(as, iri)); | |
| 1959 } else { | |
| 1960 emit_mrm(as, XO_FLDq, XOg_FLDq, asm_fuseload(as, ref, RSET_EMPTY)); | |
| 1961 } | |
| 1962 } | |
| 1963 | |
| 1964 static void asm_fpmath(ASMState *as, IRIns *ir) | |
| 1965 { | |
| 1966 IRFPMathOp fpm = (IRFPMathOp)ir->op2; | |
| 1967 if (fpm == IRFPM_SQRT) { | |
| 1968 Reg dest = ra_dest(as, ir, RSET_FPR); | |
| 1969 Reg left = asm_fuseload(as, ir->op1, RSET_FPR); | |
| 1970 emit_mrm(as, XO_SQRTSD, dest, left); | |
| 1971 } else if (fpm <= IRFPM_TRUNC) { | |
| 1972 if (as->flags & JIT_F_SSE4_1) { /* SSE4.1 has a rounding instruction. */ | |
| 1973 Reg dest = ra_dest(as, ir, RSET_FPR); | |
| 1974 Reg left = asm_fuseload(as, ir->op1, RSET_FPR); | |
| 1975 /* ROUNDSD has a 4-byte opcode which doesn't fit in x86Op. | |
| 1976 ** Let's pretend it's a 3-byte opcode, and compensate afterwards. | |
| 1977 ** This is atrocious, but the alternatives are much worse. | |
| 1978 */ | |
| 1979 /* Round down/up/trunc == 1001/1010/1011. */ | |
| 1980 emit_i8(as, 0x09 + fpm); | |
| 1981 emit_mrm(as, XO_ROUNDSD, dest, left); | |
| 1982 if (LJ_64 && as->mcp[1] != (MCode)(XO_ROUNDSD >> 16)) { | |
| 1983 as->mcp[0] = as->mcp[1]; as->mcp[1] = 0x0f; /* Swap 0F and REX. */ | |
| 1984 } | |
| 1985 *--as->mcp = 0x66; /* 1st byte of ROUNDSD opcode. */ | |
| 1986 } else { /* Call helper functions for SSE2 variant. */ | |
| 1987 /* The modified regs must match with the *.dasc implementation. */ | |
| 1988 RegSet drop = RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX); | |
| 1989 if (ra_hasreg(ir->r)) | |
| 1990 rset_clear(drop, ir->r); /* Dest reg handled below. */ | |
| 1991 ra_evictset(as, drop); | |
| 1992 ra_destreg(as, ir, RID_XMM0); | |
| 1993 emit_call(as, fpm == IRFPM_FLOOR ? lj_vm_floor_sse : | |
| 1994 fpm == IRFPM_CEIL ? lj_vm_ceil_sse : lj_vm_trunc_sse); | |
| 1995 ra_left(as, RID_XMM0, ir->op1); | |
| 1996 } | |
| 1997 } else { | |
| 1998 asm_callid(as, ir, IRCALL_lj_vm_floor + fpm); | |
| 1999 } | |
| 2000 } | |
| 2001 | |
| 2002 static void asm_ldexp(ASMState *as, IRIns *ir) | |
| 2003 { | |
| 2004 int32_t ofs = sps_scale(ir->s); /* Use spill slot or temp slots. */ | |
| 2005 Reg dest = ir->r; | |
| 2006 if (ra_hasreg(dest)) { | |
| 2007 ra_free(as, dest); | |
| 2008 ra_modified(as, dest); | |
| 2009 emit_rmro(as, XO_MOVSD, dest, RID_ESP, ofs); | |
| 2010 } | |
| 2011 emit_rmro(as, XO_FSTPq, XOg_FSTPq, RID_ESP, ofs); | |
| 2012 emit_x87op(as, XI_FPOP1); | |
| 2013 emit_x87op(as, XI_FSCALE); | |
| 2014 asm_x87load(as, ir->op1); | |
| 2015 asm_x87load(as, ir->op2); | |
| 2016 } | |
| 2017 | |
| 2018 static int asm_swapops(ASMState *as, IRIns *ir) | |
| 2019 { | |
| 2020 IRIns *irl = IR(ir->op1); | |
| 2021 IRIns *irr = IR(ir->op2); | |
| 2022 lj_assertA(ra_noreg(irr->r), "bad usage"); | |
| 2023 if (!irm_iscomm(lj_ir_mode[ir->o])) | |
| 2024 return 0; /* Can't swap non-commutative operations. */ | |
| 2025 if (irref_isk(ir->op2)) | |
| 2026 return 0; /* Don't swap constants to the left. */ | |
| 2027 if (ra_hasreg(irl->r)) | |
| 2028 return 1; /* Swap if left already has a register. */ | |
| 2029 if (ra_samehint(ir->r, irr->r)) | |
| 2030 return 1; /* Swap if dest and right have matching hints. */ | |
| 2031 if (as->curins > as->loopref) { /* In variant part? */ | |
| 2032 if (ir->op2 < as->loopref && !irt_isphi(irr->t)) | |
| 2033 return 0; /* Keep invariants on the right. */ | |
| 2034 if (ir->op1 < as->loopref && !irt_isphi(irl->t)) | |
| 2035 return 1; /* Swap invariants to the right. */ | |
| 2036 } | |
| 2037 if (opisfusableload(irl->o)) | |
| 2038 return 1; /* Swap fusable loads to the right. */ | |
| 2039 return 0; /* Otherwise don't swap. */ | |
| 2040 } | |
| 2041 | |
| 2042 static void asm_fparith(ASMState *as, IRIns *ir, x86Op xo) | |
| 2043 { | |
| 2044 IRRef lref = ir->op1; | |
| 2045 IRRef rref = ir->op2; | |
| 2046 RegSet allow = RSET_FPR; | |
| 2047 Reg dest; | |
| 2048 Reg right = IR(rref)->r; | |
| 2049 if (ra_hasreg(right)) { | |
| 2050 rset_clear(allow, right); | |
| 2051 ra_noweak(as, right); | |
| 2052 } | |
| 2053 dest = ra_dest(as, ir, allow); | |
| 2054 if (lref == rref) { | |
| 2055 right = dest; | |
| 2056 } else if (ra_noreg(right)) { | |
| 2057 if (asm_swapops(as, ir)) { | |
| 2058 IRRef tmp = lref; lref = rref; rref = tmp; | |
| 2059 } | |
| 2060 right = asm_fuseload(as, rref, rset_clear(allow, dest)); | |
| 2061 } | |
| 2062 emit_mrm(as, xo, dest, right); | |
| 2063 ra_left(as, dest, lref); | |
| 2064 } | |
| 2065 | |
| 2066 static void asm_intarith(ASMState *as, IRIns *ir, x86Arith xa) | |
| 2067 { | |
| 2068 IRRef lref = ir->op1; | |
| 2069 IRRef rref = ir->op2; | |
| 2070 RegSet allow = RSET_GPR; | |
| 2071 Reg dest, right; | |
| 2072 int32_t k = 0; | |
| 2073 if (as->flagmcp == as->mcp) { /* Drop test r,r instruction. */ | |
| 2074 MCode *p = as->mcp + ((LJ_64 && *as->mcp < XI_TESTb) ? 3 : 2); | |
| 2075 MCode *q = p[0] == 0x0f ? p+1 : p; | |
| 2076 if ((*q & 15) < 14) { | |
| 2077 if ((*q & 15) >= 12) *q -= 4; /* L <->S, NL <-> NS */ | |
| 2078 as->flagmcp = NULL; | |
| 2079 as->mcp = p; | |
| 2080 } /* else: cannot transform LE/NLE to cc without use of OF. */ | |
| 2081 } | |
| 2082 right = IR(rref)->r; | |
| 2083 if (ra_hasreg(right)) { | |
| 2084 rset_clear(allow, right); | |
| 2085 ra_noweak(as, right); | |
| 2086 } | |
| 2087 dest = ra_dest(as, ir, allow); | |
| 2088 if (lref == rref) { | |
| 2089 right = dest; | |
| 2090 } else if (ra_noreg(right) && !asm_isk32(as, rref, &k)) { | |
| 2091 if (asm_swapops(as, ir)) { | |
| 2092 IRRef tmp = lref; lref = rref; rref = tmp; | |
| 2093 } | |
| 2094 right = asm_fuseloadm(as, rref, rset_clear(allow, dest), irt_is64(ir->t)); | |
| 2095 } | |
| 2096 if (irt_isguard(ir->t)) /* For IR_ADDOV etc. */ | |
| 2097 asm_guardcc(as, CC_O); | |
| 2098 if (xa != XOg_X_IMUL) { | |
| 2099 if (ra_hasreg(right)) | |
| 2100 emit_mrm(as, XO_ARITH(xa), REX_64IR(ir, dest), right); | |
| 2101 else | |
| 2102 emit_gri(as, XG_ARITHi(xa), REX_64IR(ir, dest), k); | |
| 2103 } else if (ra_hasreg(right)) { /* IMUL r, mrm. */ | |
| 2104 emit_mrm(as, XO_IMUL, REX_64IR(ir, dest), right); | |
| 2105 } else { /* IMUL r, r, k. */ | |
| 2106 /* NYI: use lea/shl/add/sub (FOLD only does 2^k) depending on CPU. */ | |
| 2107 Reg left = asm_fuseloadm(as, lref, RSET_GPR, irt_is64(ir->t)); | |
| 2108 x86Op xo; | |
| 2109 if (checki8(k)) { emit_i8(as, k); xo = XO_IMULi8; | |
| 2110 } else { emit_i32(as, k); xo = XO_IMULi; } | |
| 2111 emit_mrm(as, xo, REX_64IR(ir, dest), left); | |
| 2112 return; | |
| 2113 } | |
| 2114 ra_left(as, dest, lref); | |
| 2115 } | |
| 2116 | |
| 2117 /* LEA is really a 4-operand ADD with an independent destination register, | |
| 2118 ** up to two source registers and an immediate. One register can be scaled | |
| 2119 ** by 1, 2, 4 or 8. This can be used to avoid moves or to fuse several | |
| 2120 ** instructions. | |
| 2121 ** | |
| 2122 ** Currently only a few common cases are supported: | |
| 2123 ** - 3-operand ADD: y = a+b; y = a+k with a and b already allocated | |
| 2124 ** - Left ADD fusion: y = (a+b)+k; y = (a+k)+b | |
| 2125 ** - Right ADD fusion: y = a+(b+k) | |
| 2126 ** The ommited variants have already been reduced by FOLD. | |
| 2127 ** | |
| 2128 ** There are more fusion opportunities, like gathering shifts or joining | |
| 2129 ** common references. But these are probably not worth the trouble, since | |
| 2130 ** array indexing is not decomposed and already makes use of all fields | |
| 2131 ** of the ModRM operand. | |
| 2132 */ | |
| 2133 static int asm_lea(ASMState *as, IRIns *ir) | |
| 2134 { | |
| 2135 IRIns *irl = IR(ir->op1); | |
| 2136 IRIns *irr = IR(ir->op2); | |
| 2137 RegSet allow = RSET_GPR; | |
| 2138 Reg dest; | |
| 2139 as->mrm.base = as->mrm.idx = RID_NONE; | |
| 2140 as->mrm.scale = XM_SCALE1; | |
| 2141 as->mrm.ofs = 0; | |
| 2142 if (ra_hasreg(irl->r)) { | |
| 2143 rset_clear(allow, irl->r); | |
| 2144 ra_noweak(as, irl->r); | |
| 2145 as->mrm.base = irl->r; | |
| 2146 if (irref_isk(ir->op2) || ra_hasreg(irr->r)) { | |
| 2147 /* The PHI renaming logic does a better job in some cases. */ | |
| 2148 if (ra_hasreg(ir->r) && | |
| 2149 ((irt_isphi(irl->t) && as->phireg[ir->r] == ir->op1) || | |
| 2150 (irt_isphi(irr->t) && as->phireg[ir->r] == ir->op2))) | |
| 2151 return 0; | |
| 2152 if (irref_isk(ir->op2)) { | |
| 2153 as->mrm.ofs = irr->i; | |
| 2154 } else { | |
| 2155 rset_clear(allow, irr->r); | |
| 2156 ra_noweak(as, irr->r); | |
| 2157 as->mrm.idx = irr->r; | |
| 2158 } | |
| 2159 } else if (irr->o == IR_ADD && mayfuse(as, ir->op2) && | |
| 2160 irref_isk(irr->op2)) { | |
| 2161 Reg idx = ra_alloc1(as, irr->op1, allow); | |
| 2162 rset_clear(allow, idx); | |
| 2163 as->mrm.idx = (uint8_t)idx; | |
| 2164 as->mrm.ofs = IR(irr->op2)->i; | |
| 2165 } else { | |
| 2166 return 0; | |
| 2167 } | |
| 2168 } else if (ir->op1 != ir->op2 && irl->o == IR_ADD && mayfuse(as, ir->op1) && | |
| 2169 (irref_isk(ir->op2) || irref_isk(irl->op2))) { | |
| 2170 Reg idx, base = ra_alloc1(as, irl->op1, allow); | |
| 2171 rset_clear(allow, base); | |
| 2172 as->mrm.base = (uint8_t)base; | |
| 2173 if (irref_isk(ir->op2)) { | |
| 2174 as->mrm.ofs = irr->i; | |
| 2175 idx = ra_alloc1(as, irl->op2, allow); | |
| 2176 } else { | |
| 2177 as->mrm.ofs = IR(irl->op2)->i; | |
| 2178 idx = ra_alloc1(as, ir->op2, allow); | |
| 2179 } | |
| 2180 rset_clear(allow, idx); | |
| 2181 as->mrm.idx = (uint8_t)idx; | |
| 2182 } else { | |
| 2183 return 0; | |
| 2184 } | |
| 2185 dest = ra_dest(as, ir, allow); | |
| 2186 emit_mrm(as, XO_LEA, dest, RID_MRM); | |
| 2187 return 1; /* Success. */ | |
| 2188 } | |
| 2189 | |
| 2190 static void asm_add(ASMState *as, IRIns *ir) | |
| 2191 { | |
| 2192 if (irt_isnum(ir->t)) | |
| 2193 asm_fparith(as, ir, XO_ADDSD); | |
| 2194 else if (as->flagmcp == as->mcp || irt_is64(ir->t) || !asm_lea(as, ir)) | |
| 2195 asm_intarith(as, ir, XOg_ADD); | |
| 2196 } | |
| 2197 | |
| 2198 static void asm_sub(ASMState *as, IRIns *ir) | |
| 2199 { | |
| 2200 if (irt_isnum(ir->t)) | |
| 2201 asm_fparith(as, ir, XO_SUBSD); | |
| 2202 else /* Note: no need for LEA trick here. i-k is encoded as i+(-k). */ | |
| 2203 asm_intarith(as, ir, XOg_SUB); | |
| 2204 } | |
| 2205 | |
| 2206 static void asm_mul(ASMState *as, IRIns *ir) | |
| 2207 { | |
| 2208 if (irt_isnum(ir->t)) | |
| 2209 asm_fparith(as, ir, XO_MULSD); | |
| 2210 else | |
| 2211 asm_intarith(as, ir, XOg_X_IMUL); | |
| 2212 } | |
| 2213 | |
| 2214 #define asm_fpdiv(as, ir) asm_fparith(as, ir, XO_DIVSD) | |
| 2215 | |
| 2216 static void asm_neg_not(ASMState *as, IRIns *ir, x86Group3 xg) | |
| 2217 { | |
| 2218 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 2219 emit_rr(as, XO_GROUP3, REX_64IR(ir, xg), dest); | |
| 2220 ra_left(as, dest, ir->op1); | |
| 2221 } | |
| 2222 | |
| 2223 static void asm_neg(ASMState *as, IRIns *ir) | |
| 2224 { | |
| 2225 if (irt_isnum(ir->t)) | |
| 2226 asm_fparith(as, ir, XO_XORPS); | |
| 2227 else | |
| 2228 asm_neg_not(as, ir, XOg_NEG); | |
| 2229 } | |
| 2230 | |
| 2231 #define asm_abs(as, ir) asm_fparith(as, ir, XO_ANDPS) | |
| 2232 | |
| 2233 static void asm_intmin_max(ASMState *as, IRIns *ir, int cc) | |
| 2234 { | |
| 2235 Reg right, dest = ra_dest(as, ir, RSET_GPR); | |
| 2236 IRRef lref = ir->op1, rref = ir->op2; | |
| 2237 if (irref_isk(rref)) { lref = rref; rref = ir->op1; } | |
| 2238 right = ra_alloc1(as, rref, rset_exclude(RSET_GPR, dest)); | |
| 2239 emit_rr(as, XO_CMOV + (cc<<24), REX_64IR(ir, dest), right); | |
| 2240 emit_rr(as, XO_CMP, REX_64IR(ir, dest), right); | |
| 2241 ra_left(as, dest, lref); | |
| 2242 } | |
| 2243 | |
| 2244 static void asm_min(ASMState *as, IRIns *ir) | |
| 2245 { | |
| 2246 if (irt_isnum(ir->t)) | |
| 2247 asm_fparith(as, ir, XO_MINSD); | |
| 2248 else | |
| 2249 asm_intmin_max(as, ir, CC_G); | |
| 2250 } | |
| 2251 | |
| 2252 static void asm_max(ASMState *as, IRIns *ir) | |
| 2253 { | |
| 2254 if (irt_isnum(ir->t)) | |
| 2255 asm_fparith(as, ir, XO_MAXSD); | |
| 2256 else | |
| 2257 asm_intmin_max(as, ir, CC_L); | |
| 2258 } | |
| 2259 | |
| 2260 /* Note: don't use LEA for overflow-checking arithmetic! */ | |
| 2261 #define asm_addov(as, ir) asm_intarith(as, ir, XOg_ADD) | |
| 2262 #define asm_subov(as, ir) asm_intarith(as, ir, XOg_SUB) | |
| 2263 #define asm_mulov(as, ir) asm_intarith(as, ir, XOg_X_IMUL) | |
| 2264 | |
| 2265 #define asm_bnot(as, ir) asm_neg_not(as, ir, XOg_NOT) | |
| 2266 | |
| 2267 static void asm_bswap(ASMState *as, IRIns *ir) | |
| 2268 { | |
| 2269 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 2270 as->mcp = emit_op(XO_BSWAP + ((dest&7) << 24), | |
| 2271 REX_64IR(ir, 0), dest, 0, as->mcp, 1); | |
| 2272 ra_left(as, dest, ir->op1); | |
| 2273 } | |
| 2274 | |
| 2275 #define asm_band(as, ir) asm_intarith(as, ir, XOg_AND) | |
| 2276 #define asm_bor(as, ir) asm_intarith(as, ir, XOg_OR) | |
| 2277 #define asm_bxor(as, ir) asm_intarith(as, ir, XOg_XOR) | |
| 2278 | |
| 2279 static void asm_bitshift(ASMState *as, IRIns *ir, x86Shift xs, x86Op xv) | |
| 2280 { | |
| 2281 IRRef rref = ir->op2; | |
| 2282 IRIns *irr = IR(rref); | |
| 2283 Reg dest; | |
| 2284 if (irref_isk(rref)) { /* Constant shifts. */ | |
| 2285 int shift; | |
| 2286 dest = ra_dest(as, ir, RSET_GPR); | |
| 2287 shift = irr->i & (irt_is64(ir->t) ? 63 : 31); | |
| 2288 if (!xv && shift && (as->flags & JIT_F_BMI2)) { | |
| 2289 Reg left = asm_fuseloadm(as, ir->op1, RSET_GPR, irt_is64(ir->t)); | |
| 2290 if (left != dest) { /* BMI2 rotate right by constant. */ | |
| 2291 emit_i8(as, xs == XOg_ROL ? -shift : shift); | |
| 2292 emit_mrm(as, VEX_64IR(ir, XV_RORX), dest, left); | |
| 2293 return; | |
| 2294 } | |
| 2295 } | |
| 2296 switch (shift) { | |
| 2297 case 0: break; | |
| 2298 case 1: emit_rr(as, XO_SHIFT1, REX_64IR(ir, xs), dest); break; | |
| 2299 default: emit_shifti(as, REX_64IR(ir, xs), dest, shift); break; | |
| 2300 } | |
| 2301 } else if ((as->flags & JIT_F_BMI2) && xv) { /* BMI2 variable shifts. */ | |
| 2302 Reg left, right; | |
| 2303 dest = ra_dest(as, ir, RSET_GPR); | |
| 2304 right = ra_alloc1(as, rref, RSET_GPR); | |
| 2305 left = asm_fuseloadm(as, ir->op1, rset_exclude(RSET_GPR, right), | |
| 2306 irt_is64(ir->t)); | |
| 2307 emit_mrm(as, VEX_64IR(ir, xv) ^ (right << 19), dest, left); | |
| 2308 return; | |
| 2309 } else { /* Variable shifts implicitly use register cl (i.e. ecx). */ | |
| 2310 Reg right; | |
| 2311 dest = ra_dest(as, ir, rset_exclude(RSET_GPR, RID_ECX)); | |
| 2312 if (dest == RID_ECX) { | |
| 2313 dest = ra_scratch(as, rset_exclude(RSET_GPR, RID_ECX)); | |
| 2314 emit_rr(as, XO_MOV, REX_64IR(ir, RID_ECX), dest); | |
| 2315 } | |
| 2316 right = irr->r; | |
| 2317 if (ra_noreg(right)) | |
| 2318 right = ra_allocref(as, rref, RID2RSET(RID_ECX)); | |
| 2319 else if (right != RID_ECX) | |
| 2320 ra_scratch(as, RID2RSET(RID_ECX)); | |
| 2321 emit_rr(as, XO_SHIFTcl, REX_64IR(ir, xs), dest); | |
| 2322 ra_noweak(as, right); | |
| 2323 if (right != RID_ECX) | |
| 2324 emit_rr(as, XO_MOV, RID_ECX, right); | |
| 2325 } | |
| 2326 ra_left(as, dest, ir->op1); | |
| 2327 /* | |
| 2328 ** Note: avoid using the flags resulting from a shift or rotate! | |
| 2329 ** All of them cause a partial flag stall, except for r,1 shifts | |
| 2330 ** (but not rotates). And a shift count of 0 leaves the flags unmodified. | |
| 2331 */ | |
| 2332 } | |
| 2333 | |
| 2334 #define asm_bshl(as, ir) asm_bitshift(as, ir, XOg_SHL, XV_SHLX) | |
| 2335 #define asm_bshr(as, ir) asm_bitshift(as, ir, XOg_SHR, XV_SHRX) | |
| 2336 #define asm_bsar(as, ir) asm_bitshift(as, ir, XOg_SAR, XV_SARX) | |
| 2337 #define asm_brol(as, ir) asm_bitshift(as, ir, XOg_ROL, 0) | |
| 2338 #define asm_bror(as, ir) asm_bitshift(as, ir, XOg_ROR, 0) | |
| 2339 | |
| 2340 /* -- Comparisons --------------------------------------------------------- */ | |
| 2341 | |
| 2342 /* Virtual flags for unordered FP comparisons. */ | |
| 2343 #define VCC_U 0x1000 /* Unordered. */ | |
| 2344 #define VCC_P 0x2000 /* Needs extra CC_P branch. */ | |
| 2345 #define VCC_S 0x4000 /* Swap avoids CC_P branch. */ | |
| 2346 #define VCC_PS (VCC_P|VCC_S) | |
| 2347 | |
| 2348 /* Map of comparisons to flags. ORDER IR. */ | |
| 2349 #define COMPFLAGS(ci, cin, cu, cf) ((ci)+((cu)<<4)+((cin)<<8)+(cf)) | |
| 2350 static const uint16_t asm_compmap[IR_ABC+1] = { | |
| 2351 /* signed non-eq unsigned flags */ | |
| 2352 /* LT */ COMPFLAGS(CC_GE, CC_G, CC_AE, VCC_PS), | |
| 2353 /* GE */ COMPFLAGS(CC_L, CC_L, CC_B, 0), | |
| 2354 /* LE */ COMPFLAGS(CC_G, CC_G, CC_A, VCC_PS), | |
| 2355 /* GT */ COMPFLAGS(CC_LE, CC_L, CC_BE, 0), | |
| 2356 /* ULT */ COMPFLAGS(CC_AE, CC_A, CC_AE, VCC_U), | |
| 2357 /* UGE */ COMPFLAGS(CC_B, CC_B, CC_B, VCC_U|VCC_PS), | |
| 2358 /* ULE */ COMPFLAGS(CC_A, CC_A, CC_A, VCC_U), | |
| 2359 /* UGT */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS), | |
| 2360 /* EQ */ COMPFLAGS(CC_NE, CC_NE, CC_NE, VCC_P), | |
| 2361 /* NE */ COMPFLAGS(CC_E, CC_E, CC_E, VCC_U|VCC_P), | |
| 2362 /* ABC */ COMPFLAGS(CC_BE, CC_B, CC_BE, VCC_U|VCC_PS) /* Same as UGT. */ | |
| 2363 }; | |
| 2364 | |
| 2365 /* FP and integer comparisons. */ | |
| 2366 static void asm_comp(ASMState *as, IRIns *ir) | |
| 2367 { | |
| 2368 uint32_t cc = asm_compmap[ir->o]; | |
| 2369 if (irt_isnum(ir->t)) { | |
| 2370 IRRef lref = ir->op1; | |
| 2371 IRRef rref = ir->op2; | |
| 2372 Reg left, right; | |
| 2373 MCLabel l_around; | |
| 2374 /* | |
| 2375 ** An extra CC_P branch is required to preserve ordered/unordered | |
| 2376 ** semantics for FP comparisons. This can be avoided by swapping | |
| 2377 ** the operands and inverting the condition (except for EQ and UNE). | |
| 2378 ** So always try to swap if possible. | |
| 2379 ** | |
| 2380 ** Another option would be to swap operands to achieve better memory | |
| 2381 ** operand fusion. But it's unlikely that this outweighs the cost | |
| 2382 ** of the extra branches. | |
| 2383 */ | |
| 2384 if (cc & VCC_S) { /* Swap? */ | |
| 2385 IRRef tmp = lref; lref = rref; rref = tmp; | |
| 2386 cc ^= (VCC_PS|(5<<4)); /* A <-> B, AE <-> BE, PS <-> none */ | |
| 2387 } | |
| 2388 left = ra_alloc1(as, lref, RSET_FPR); | |
| 2389 l_around = emit_label(as); | |
| 2390 asm_guardcc(as, cc >> 4); | |
| 2391 if (cc & VCC_P) { /* Extra CC_P branch required? */ | |
| 2392 if (!(cc & VCC_U)) { | |
| 2393 asm_guardcc(as, CC_P); /* Branch to exit for ordered comparisons. */ | |
| 2394 } else if (l_around != as->invmcp) { | |
| 2395 emit_sjcc(as, CC_P, l_around); /* Branch around for unordered. */ | |
| 2396 } else { | |
| 2397 /* Patched to mcloop by asm_loop_fixup. */ | |
| 2398 as->loopinv = 2; | |
| 2399 if (as->realign) | |
| 2400 emit_sjcc(as, CC_P, as->mcp); | |
| 2401 else | |
| 2402 emit_jcc(as, CC_P, as->mcp); | |
| 2403 } | |
| 2404 } | |
| 2405 right = asm_fuseload(as, rref, rset_exclude(RSET_FPR, left)); | |
| 2406 emit_mrm(as, XO_UCOMISD, left, right); | |
| 2407 } else { | |
| 2408 IRRef lref = ir->op1, rref = ir->op2; | |
| 2409 IROp leftop = (IROp)(IR(lref)->o); | |
| 2410 Reg r64 = REX_64IR(ir, 0); | |
| 2411 int32_t imm = 0; | |
| 2412 lj_assertA(irt_is64(ir->t) || irt_isint(ir->t) || | |
| 2413 irt_isu32(ir->t) || irt_isaddr(ir->t) || irt_isu8(ir->t), | |
| 2414 "bad comparison data type %d", irt_type(ir->t)); | |
| 2415 /* Swap constants (only for ABC) and fusable loads to the right. */ | |
| 2416 if (irref_isk(lref) || (!irref_isk(rref) && opisfusableload(leftop))) { | |
| 2417 if ((cc & 0xc) == 0xc) cc ^= 0x53; /* L <-> G, LE <-> GE */ | |
| 2418 else if ((cc & 0xa) == 0x2) cc ^= 0x55; /* A <-> B, AE <-> BE */ | |
| 2419 lref = ir->op2; rref = ir->op1; | |
| 2420 } | |
| 2421 if (asm_isk32(as, rref, &imm)) { | |
| 2422 IRIns *irl = IR(lref); | |
| 2423 /* Check wether we can use test ins. Not for unsigned, since CF=0. */ | |
| 2424 int usetest = (imm == 0 && (cc & 0xa) != 0x2); | |
| 2425 if (usetest && irl->o == IR_BAND && irl+1 == ir && !ra_used(irl)) { | |
| 2426 /* Combine comp(BAND(ref, r/imm), 0) into test mrm, r/imm. */ | |
| 2427 Reg right, left = RID_NONE; | |
| 2428 RegSet allow = RSET_GPR; | |
| 2429 if (!asm_isk32(as, irl->op2, &imm)) { | |
| 2430 left = ra_alloc1(as, irl->op2, allow); | |
| 2431 rset_clear(allow, left); | |
| 2432 } else { /* Try to Fuse IRT_I8/IRT_U8 loads, too. See below. */ | |
| 2433 IRIns *irll = IR(irl->op1); | |
| 2434 if (opisfusableload((IROp)irll->o) && | |
| 2435 (irt_isi8(irll->t) || irt_isu8(irll->t))) { | |
| 2436 IRType1 origt = irll->t; /* Temporarily flip types. */ | |
| 2437 irll->t.irt = (irll->t.irt & ~IRT_TYPE) | IRT_INT; | |
| 2438 as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ | |
| 2439 right = asm_fuseload(as, irl->op1, RSET_GPR); | |
| 2440 as->curins++; | |
| 2441 irll->t = origt; | |
| 2442 if (right != RID_MRM) goto test_nofuse; | |
| 2443 /* Fusion succeeded, emit test byte mrm, imm8. */ | |
| 2444 asm_guardcc(as, cc); | |
| 2445 emit_i8(as, (imm & 0xff)); | |
| 2446 emit_mrm(as, XO_GROUP3b, XOg_TEST, RID_MRM); | |
| 2447 return; | |
| 2448 } | |
| 2449 } | |
| 2450 as->curins--; /* Skip to BAND to avoid failing in noconflict(). */ | |
| 2451 right = asm_fuseloadm(as, irl->op1, allow, r64); | |
| 2452 as->curins++; /* Undo the above. */ | |
| 2453 test_nofuse: | |
| 2454 asm_guardcc(as, cc); | |
| 2455 if (ra_noreg(left)) { | |
| 2456 emit_i32(as, imm); | |
| 2457 emit_mrm(as, XO_GROUP3, r64 + XOg_TEST, right); | |
| 2458 } else { | |
| 2459 emit_mrm(as, XO_TEST, r64 + left, right); | |
| 2460 } | |
| 2461 } else { | |
| 2462 Reg left; | |
| 2463 if (opisfusableload((IROp)irl->o) && | |
| 2464 ((irt_isu8(irl->t) && checku8(imm)) || | |
| 2465 ((irt_isi8(irl->t) || irt_isi16(irl->t)) && checki8(imm)) || | |
| 2466 (irt_isu16(irl->t) && checku16(imm) && checki8((int16_t)imm)))) { | |
| 2467 /* Only the IRT_INT case is fused by asm_fuseload. | |
| 2468 ** The IRT_I8/IRT_U8 loads and some IRT_I16/IRT_U16 loads | |
| 2469 ** are handled here. | |
| 2470 ** Note that cmp word [mem], imm16 should not be generated, | |
| 2471 ** since it has a length-changing prefix. Compares of a word | |
| 2472 ** against a sign-extended imm8 are ok, however. | |
| 2473 */ | |
| 2474 IRType1 origt = irl->t; /* Temporarily flip types. */ | |
| 2475 irl->t.irt = (irl->t.irt & ~IRT_TYPE) | IRT_INT; | |
| 2476 left = asm_fuseload(as, lref, RSET_GPR); | |
| 2477 irl->t = origt; | |
| 2478 if (left == RID_MRM) { /* Fusion succeeded? */ | |
| 2479 if (irt_isu8(irl->t) || irt_isu16(irl->t)) | |
| 2480 cc >>= 4; /* Need unsigned compare. */ | |
| 2481 asm_guardcc(as, cc); | |
| 2482 emit_i8(as, imm); | |
| 2483 emit_mrm(as, (irt_isi8(origt) || irt_isu8(origt)) ? | |
| 2484 XO_ARITHib : XO_ARITHiw8, r64 + XOg_CMP, RID_MRM); | |
| 2485 return; | |
| 2486 } /* Otherwise handle register case as usual. */ | |
| 2487 } else { | |
| 2488 left = asm_fuseloadm(as, lref, | |
| 2489 irt_isu8(ir->t) ? RSET_GPR8 : RSET_GPR, r64); | |
| 2490 } | |
| 2491 asm_guardcc(as, cc); | |
| 2492 if (usetest && left != RID_MRM) { | |
| 2493 /* Use test r,r instead of cmp r,0. */ | |
| 2494 x86Op xo = XO_TEST; | |
| 2495 if (irt_isu8(ir->t)) { | |
| 2496 lj_assertA(ir->o == IR_EQ || ir->o == IR_NE, "bad usage"); | |
| 2497 xo = XO_TESTb; | |
| 2498 if (!rset_test(RSET_RANGE(RID_EAX, RID_EBX+1), left)) { | |
| 2499 if (LJ_64) { | |
| 2500 left |= FORCE_REX; | |
| 2501 } else { | |
| 2502 emit_i32(as, 0xff); | |
| 2503 emit_mrm(as, XO_GROUP3, XOg_TEST, left); | |
| 2504 return; | |
| 2505 } | |
| 2506 } | |
| 2507 } | |
| 2508 emit_rr(as, xo, r64 + left, left); | |
| 2509 if (irl+1 == ir) /* Referencing previous ins? */ | |
| 2510 as->flagmcp = as->mcp; /* Set flag to drop test r,r if possible. */ | |
| 2511 } else { | |
| 2512 emit_gmrmi(as, XG_ARITHi(XOg_CMP), r64 + left, imm); | |
| 2513 } | |
| 2514 } | |
| 2515 } else { | |
| 2516 Reg left = ra_alloc1(as, lref, RSET_GPR); | |
| 2517 Reg right = asm_fuseloadm(as, rref, rset_exclude(RSET_GPR, left), r64); | |
| 2518 asm_guardcc(as, cc); | |
| 2519 emit_mrm(as, XO_CMP, r64 + left, right); | |
| 2520 } | |
| 2521 } | |
| 2522 } | |
| 2523 | |
| 2524 #define asm_equal(as, ir) asm_comp(as, ir) | |
| 2525 | |
| 2526 #if LJ_32 && LJ_HASFFI | |
| 2527 /* 64 bit integer comparisons in 32 bit mode. */ | |
| 2528 static void asm_comp_int64(ASMState *as, IRIns *ir) | |
| 2529 { | |
| 2530 uint32_t cc = asm_compmap[(ir-1)->o]; | |
| 2531 RegSet allow = RSET_GPR; | |
| 2532 Reg lefthi = RID_NONE, leftlo = RID_NONE; | |
| 2533 Reg righthi = RID_NONE, rightlo = RID_NONE; | |
| 2534 MCLabel l_around; | |
| 2535 x86ModRM mrm; | |
| 2536 | |
| 2537 as->curins--; /* Skip loword ins. Avoids failing in noconflict(), too. */ | |
| 2538 | |
| 2539 /* Allocate/fuse hiword operands. */ | |
| 2540 if (irref_isk(ir->op2)) { | |
| 2541 lefthi = asm_fuseload(as, ir->op1, allow); | |
| 2542 } else { | |
| 2543 lefthi = ra_alloc1(as, ir->op1, allow); | |
| 2544 rset_clear(allow, lefthi); | |
| 2545 righthi = asm_fuseload(as, ir->op2, allow); | |
| 2546 if (righthi == RID_MRM) { | |
| 2547 if (as->mrm.base != RID_NONE) rset_clear(allow, as->mrm.base); | |
| 2548 if (as->mrm.idx != RID_NONE) rset_clear(allow, as->mrm.idx); | |
| 2549 } else { | |
| 2550 rset_clear(allow, righthi); | |
| 2551 } | |
| 2552 } | |
| 2553 mrm = as->mrm; /* Save state for hiword instruction. */ | |
| 2554 | |
| 2555 /* Allocate/fuse loword operands. */ | |
| 2556 if (irref_isk((ir-1)->op2)) { | |
| 2557 leftlo = asm_fuseload(as, (ir-1)->op1, allow); | |
| 2558 } else { | |
| 2559 leftlo = ra_alloc1(as, (ir-1)->op1, allow); | |
| 2560 rset_clear(allow, leftlo); | |
| 2561 rightlo = asm_fuseload(as, (ir-1)->op2, allow); | |
| 2562 } | |
| 2563 | |
| 2564 /* All register allocations must be performed _before_ this point. */ | |
| 2565 l_around = emit_label(as); | |
| 2566 as->invmcp = as->flagmcp = NULL; /* Cannot use these optimizations. */ | |
| 2567 | |
| 2568 /* Loword comparison and branch. */ | |
| 2569 asm_guardcc(as, cc >> 4); /* Always use unsigned compare for loword. */ | |
| 2570 if (ra_noreg(rightlo)) { | |
| 2571 int32_t imm = IR((ir-1)->op2)->i; | |
| 2572 if (imm == 0 && ((cc >> 4) & 0xa) != 0x2 && leftlo != RID_MRM) | |
| 2573 emit_rr(as, XO_TEST, leftlo, leftlo); | |
| 2574 else | |
| 2575 emit_gmrmi(as, XG_ARITHi(XOg_CMP), leftlo, imm); | |
| 2576 } else { | |
| 2577 emit_mrm(as, XO_CMP, leftlo, rightlo); | |
| 2578 } | |
| 2579 | |
| 2580 /* Hiword comparison and branches. */ | |
| 2581 if ((cc & 15) != CC_NE) | |
| 2582 emit_sjcc(as, CC_NE, l_around); /* Hiword unequal: skip loword compare. */ | |
| 2583 if ((cc & 15) != CC_E) | |
| 2584 asm_guardcc(as, cc >> 8); /* Hiword compare without equality check. */ | |
| 2585 as->mrm = mrm; /* Restore state. */ | |
| 2586 if (ra_noreg(righthi)) { | |
| 2587 int32_t imm = IR(ir->op2)->i; | |
| 2588 if (imm == 0 && (cc & 0xa) != 0x2 && lefthi != RID_MRM) | |
| 2589 emit_rr(as, XO_TEST, lefthi, lefthi); | |
| 2590 else | |
| 2591 emit_gmrmi(as, XG_ARITHi(XOg_CMP), lefthi, imm); | |
| 2592 } else { | |
| 2593 emit_mrm(as, XO_CMP, lefthi, righthi); | |
| 2594 } | |
| 2595 } | |
| 2596 #endif | |
| 2597 | |
| 2598 /* -- Split register ops -------------------------------------------------- */ | |
| 2599 | |
| 2600 /* Hiword op of a split 32/32 or 64/64 bit op. Previous op is the loword op. */ | |
| 2601 static void asm_hiop(ASMState *as, IRIns *ir) | |
| 2602 { | |
| 2603 /* HIOP is marked as a store because it needs its own DCE logic. */ | |
| 2604 int uselo = ra_used(ir-1), usehi = ra_used(ir); /* Loword/hiword used? */ | |
| 2605 if (LJ_UNLIKELY(!(as->flags & JIT_F_OPT_DCE))) uselo = usehi = 1; | |
| 2606 #if LJ_32 && LJ_HASFFI | |
| 2607 if ((ir-1)->o == IR_CONV) { /* Conversions to/from 64 bit. */ | |
| 2608 as->curins--; /* Always skip the CONV. */ | |
| 2609 if (usehi || uselo) | |
| 2610 asm_conv64(as, ir); | |
| 2611 return; | |
| 2612 } else if ((ir-1)->o <= IR_NE) { /* 64 bit integer comparisons. ORDER IR. */ | |
| 2613 asm_comp_int64(as, ir); | |
| 2614 return; | |
| 2615 } else if ((ir-1)->o == IR_XSTORE) { | |
| 2616 if ((ir-1)->r != RID_SINK) | |
| 2617 asm_fxstore(as, ir); | |
| 2618 return; | |
| 2619 } | |
| 2620 #endif | |
| 2621 if (!usehi) return; /* Skip unused hiword op for all remaining ops. */ | |
| 2622 switch ((ir-1)->o) { | |
| 2623 #if LJ_32 && LJ_HASFFI | |
| 2624 case IR_ADD: | |
| 2625 as->flagmcp = NULL; | |
| 2626 as->curins--; | |
| 2627 asm_intarith(as, ir, XOg_ADC); | |
| 2628 asm_intarith(as, ir-1, XOg_ADD); | |
| 2629 break; | |
| 2630 case IR_SUB: | |
| 2631 as->flagmcp = NULL; | |
| 2632 as->curins--; | |
| 2633 asm_intarith(as, ir, XOg_SBB); | |
| 2634 asm_intarith(as, ir-1, XOg_SUB); | |
| 2635 break; | |
| 2636 case IR_NEG: { | |
| 2637 Reg dest = ra_dest(as, ir, RSET_GPR); | |
| 2638 emit_rr(as, XO_GROUP3, XOg_NEG, dest); | |
| 2639 emit_i8(as, 0); | |
| 2640 emit_rr(as, XO_ARITHi8, XOg_ADC, dest); | |
| 2641 ra_left(as, dest, ir->op1); | |
| 2642 as->curins--; | |
| 2643 asm_neg_not(as, ir-1, XOg_NEG); | |
| 2644 break; | |
| 2645 } | |
| 2646 case IR_CNEWI: | |
| 2647 /* Nothing to do here. Handled by CNEWI itself. */ | |
| 2648 break; | |
| 2649 #endif | |
| 2650 case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS: | |
| 2651 if (!uselo) | |
| 2652 ra_allocref(as, ir->op1, RID2RSET(RID_RETLO)); /* Mark lo op as used. */ | |
| 2653 break; | |
| 2654 default: lj_assertA(0, "bad HIOP for op %d", (ir-1)->o); break; | |
| 2655 } | |
| 2656 } | |
| 2657 | |
| 2658 /* -- Profiling ----------------------------------------------------------- */ | |
| 2659 | |
| 2660 static void asm_prof(ASMState *as, IRIns *ir) | |
| 2661 { | |
| 2662 UNUSED(ir); | |
| 2663 asm_guardcc(as, CC_NE); | |
| 2664 emit_i8(as, HOOK_PROFILE); | |
| 2665 emit_rma(as, XO_GROUP3b, XOg_TEST, &J2G(as->J)->hookmask); | |
| 2666 } | |
| 2667 | |
| 2668 /* -- Stack handling ------------------------------------------------------ */ | |
| 2669 | |
| 2670 /* Check Lua stack size for overflow. Use exit handler as fallback. */ | |
| 2671 static void asm_stack_check(ASMState *as, BCReg topslot, | |
| 2672 IRIns *irp, RegSet allow, ExitNo exitno) | |
| 2673 { | |
| 2674 /* Try to get an unused temp. register, otherwise spill/restore eax. */ | |
| 2675 Reg pbase = irp ? irp->r : RID_BASE; | |
| 2676 Reg r = allow ? rset_pickbot(allow) : RID_EAX; | |
| 2677 emit_jcc(as, CC_B, exitstub_addr(as->J, exitno)); | |
| 2678 if (allow == RSET_EMPTY) /* Restore temp. register. */ | |
| 2679 emit_rmro(as, XO_MOV, r|REX_64, RID_ESP, 0); | |
| 2680 else | |
| 2681 ra_modified(as, r); | |
| 2682 emit_gri(as, XG_ARITHi(XOg_CMP), r|REX_GC64, (int32_t)(8*topslot)); | |
| 2683 if (ra_hasreg(pbase) && pbase != r) | |
| 2684 emit_rr(as, XO_ARITH(XOg_SUB), r|REX_GC64, pbase); | |
| 2685 else | |
| 2686 #if LJ_GC64 | |
| 2687 emit_rmro(as, XO_ARITH(XOg_SUB), r|REX_64, RID_DISPATCH, | |
| 2688 (int32_t)dispofs(as, &J2G(as->J)->jit_base)); | |
| 2689 #else | |
| 2690 emit_rmro(as, XO_ARITH(XOg_SUB), r, RID_NONE, | |
| 2691 ptr2addr(&J2G(as->J)->jit_base)); | |
| 2692 #endif | |
| 2693 emit_rmro(as, XO_MOV, r|REX_GC64, r, offsetof(lua_State, maxstack)); | |
| 2694 emit_getgl(as, r, cur_L); | |
| 2695 if (allow == RSET_EMPTY) /* Spill temp. register. */ | |
| 2696 emit_rmro(as, XO_MOVto, r|REX_64, RID_ESP, 0); | |
| 2697 } | |
| 2698 | |
| 2699 /* Restore Lua stack from on-trace state. */ | |
| 2700 static void asm_stack_restore(ASMState *as, SnapShot *snap) | |
| 2701 { | |
| 2702 SnapEntry *map = &as->T->snapmap[snap->mapofs]; | |
| 2703 #if !LJ_FR2 || defined(LUA_USE_ASSERT) | |
| 2704 SnapEntry *flinks = &as->T->snapmap[snap_nextofs(as->T, snap)-1-LJ_FR2]; | |
| 2705 #endif | |
| 2706 MSize n, nent = snap->nent; | |
| 2707 /* Store the value of all modified slots to the Lua stack. */ | |
| 2708 for (n = 0; n < nent; n++) { | |
| 2709 SnapEntry sn = map[n]; | |
| 2710 BCReg s = snap_slot(sn); | |
| 2711 int32_t ofs = 8*((int32_t)s-1-LJ_FR2); | |
| 2712 IRRef ref = snap_ref(sn); | |
| 2713 IRIns *ir = IR(ref); | |
| 2714 if ((sn & SNAP_NORESTORE)) | |
| 2715 continue; | |
| 2716 if ((sn & SNAP_KEYINDEX)) { | |
| 2717 emit_movmroi(as, RID_BASE, ofs+4, LJ_KEYINDEX); | |
| 2718 if (irref_isk(ref)) { | |
| 2719 emit_movmroi(as, RID_BASE, ofs, ir->i); | |
| 2720 } else { | |
| 2721 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); | |
| 2722 emit_movtomro(as, src, RID_BASE, ofs); | |
| 2723 } | |
| 2724 } else if (irt_isnum(ir->t)) { | |
| 2725 Reg src = ra_alloc1(as, ref, RSET_FPR); | |
| 2726 emit_rmro(as, XO_MOVSDto, src, RID_BASE, ofs); | |
| 2727 } else { | |
| 2728 lj_assertA(irt_ispri(ir->t) || irt_isaddr(ir->t) || | |
| 2729 (LJ_DUALNUM && irt_isinteger(ir->t)), | |
| 2730 "restore of IR type %d", irt_type(ir->t)); | |
| 2731 if (!irref_isk(ref)) { | |
| 2732 Reg src = ra_alloc1(as, ref, rset_exclude(RSET_GPR, RID_BASE)); | |
| 2733 #if LJ_GC64 | |
| 2734 if (irt_is64(ir->t)) { | |
| 2735 /* TODO: 64 bit store + 32 bit load-modify-store is suboptimal. */ | |
| 2736 emit_u32(as, irt_toitype(ir->t) << 15); | |
| 2737 emit_rmro(as, XO_ARITHi, XOg_OR, RID_BASE, ofs+4); | |
| 2738 } else if (LJ_DUALNUM && irt_isinteger(ir->t)) { | |
| 2739 emit_movmroi(as, RID_BASE, ofs+4, LJ_TISNUM << 15); | |
| 2740 } else { | |
| 2741 emit_movmroi(as, RID_BASE, ofs+4, (irt_toitype(ir->t)<<15)|0x7fff); | |
| 2742 } | |
| 2743 #endif | |
| 2744 emit_movtomro(as, REX_64IR(ir, src), RID_BASE, ofs); | |
| 2745 #if LJ_GC64 | |
| 2746 } else { | |
| 2747 TValue k; | |
| 2748 lj_ir_kvalue(as->J->L, &k, ir); | |
| 2749 if (tvisnil(&k)) { | |
| 2750 emit_i32(as, -1); | |
| 2751 emit_rmro(as, XO_MOVmi, REX_64, RID_BASE, ofs); | |
| 2752 } else { | |
| 2753 emit_movmroi(as, RID_BASE, ofs+4, k.u32.hi); | |
| 2754 emit_movmroi(as, RID_BASE, ofs, k.u32.lo); | |
| 2755 } | |
| 2756 #else | |
| 2757 } else if (!irt_ispri(ir->t)) { | |
| 2758 emit_movmroi(as, RID_BASE, ofs, ir->i); | |
| 2759 #endif | |
| 2760 } | |
| 2761 if ((sn & (SNAP_CONT|SNAP_FRAME))) { | |
| 2762 #if !LJ_FR2 | |
| 2763 if (s != 0) /* Do not overwrite link to previous frame. */ | |
| 2764 emit_movmroi(as, RID_BASE, ofs+4, (int32_t)(*flinks--)); | |
| 2765 #endif | |
| 2766 #if !LJ_GC64 | |
| 2767 } else { | |
| 2768 if (!(LJ_64 && irt_islightud(ir->t))) | |
| 2769 emit_movmroi(as, RID_BASE, ofs+4, irt_toitype(ir->t)); | |
| 2770 #endif | |
| 2771 } | |
| 2772 } | |
| 2773 checkmclim(as); | |
| 2774 } | |
| 2775 lj_assertA(map + nent == flinks, "inconsistent frames in snapshot"); | |
| 2776 } | |
| 2777 | |
| 2778 /* -- GC handling --------------------------------------------------------- */ | |
| 2779 | |
| 2780 /* Check GC threshold and do one or more GC steps. */ | |
| 2781 static void asm_gc_check(ASMState *as) | |
| 2782 { | |
| 2783 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_gc_step_jit]; | |
| 2784 IRRef args[2]; | |
| 2785 MCLabel l_end; | |
| 2786 Reg tmp; | |
| 2787 ra_evictset(as, RSET_SCRATCH); | |
| 2788 l_end = emit_label(as); | |
| 2789 /* Exit trace if in GCSatomic or GCSfinalize. Avoids syncing GC objects. */ | |
| 2790 asm_guardcc(as, CC_NE); /* Assumes asm_snap_prep() already done. */ | |
| 2791 emit_rr(as, XO_TEST, RID_RET, RID_RET); | |
| 2792 args[0] = ASMREF_TMP1; /* global_State *g */ | |
| 2793 args[1] = ASMREF_TMP2; /* MSize steps */ | |
| 2794 asm_gencall(as, ci, args); | |
| 2795 tmp = ra_releasetmp(as, ASMREF_TMP1); | |
| 2796 #if LJ_GC64 | |
| 2797 emit_rmro(as, XO_LEA, tmp|REX_64, RID_DISPATCH, GG_DISP2G); | |
| 2798 #else | |
| 2799 emit_loada(as, tmp, J2G(as->J)); | |
| 2800 #endif | |
| 2801 emit_loadi(as, ra_releasetmp(as, ASMREF_TMP2), as->gcsteps); | |
| 2802 /* Jump around GC step if GC total < GC threshold. */ | |
| 2803 emit_sjcc(as, CC_B, l_end); | |
| 2804 emit_opgl(as, XO_ARITH(XOg_CMP), tmp|REX_GC64, gc.threshold); | |
| 2805 emit_getgl(as, tmp, gc.total); | |
| 2806 as->gcsteps = 0; | |
| 2807 checkmclim(as); | |
| 2808 } | |
| 2809 | |
| 2810 /* -- Loop handling ------------------------------------------------------- */ | |
| 2811 | |
| 2812 /* Fixup the loop branch. */ | |
| 2813 static void asm_loop_fixup(ASMState *as) | |
| 2814 { | |
| 2815 MCode *p = as->mctop; | |
| 2816 MCode *target = as->mcp; | |
| 2817 if (as->realign) { /* Realigned loops use short jumps. */ | |
| 2818 as->realign = NULL; /* Stop another retry. */ | |
| 2819 lj_assertA(((intptr_t)target & 15) == 0, "loop realign failed"); | |
| 2820 if (as->loopinv) { /* Inverted loop branch? */ | |
| 2821 p -= 5; | |
| 2822 p[0] = XI_JMP; | |
| 2823 lj_assertA(target - p >= -128, "loop realign failed"); | |
| 2824 p[-1] = (MCode)(target - p); /* Patch sjcc. */ | |
| 2825 if (as->loopinv == 2) | |
| 2826 p[-3] = (MCode)(target - p + 2); /* Patch opt. short jp. */ | |
| 2827 } else { | |
| 2828 lj_assertA(target - p >= -128, "loop realign failed"); | |
| 2829 p[-1] = (MCode)(int8_t)(target - p); /* Patch short jmp. */ | |
| 2830 p[-2] = XI_JMPs; | |
| 2831 } | |
| 2832 } else { | |
| 2833 MCode *newloop; | |
| 2834 p[-5] = XI_JMP; | |
| 2835 if (as->loopinv) { /* Inverted loop branch? */ | |
| 2836 /* asm_guardcc already inverted the jcc and patched the jmp. */ | |
| 2837 p -= 5; | |
| 2838 newloop = target+4; | |
| 2839 *(int32_t *)(p-4) = (int32_t)(target - p); /* Patch jcc. */ | |
| 2840 if (as->loopinv == 2) { | |
| 2841 *(int32_t *)(p-10) = (int32_t)(target - p + 6); /* Patch opt. jp. */ | |
| 2842 newloop = target+8; | |
| 2843 } | |
| 2844 } else { /* Otherwise just patch jmp. */ | |
| 2845 *(int32_t *)(p-4) = (int32_t)(target - p); | |
| 2846 newloop = target+3; | |
| 2847 } | |
| 2848 /* Realign small loops and shorten the loop branch. */ | |
| 2849 if (newloop >= p - 128) { | |
| 2850 as->realign = newloop; /* Force a retry and remember alignment. */ | |
| 2851 as->curins = as->stopins; /* Abort asm_trace now. */ | |
| 2852 as->T->nins = as->orignins; /* Remove any added renames. */ | |
| 2853 } | |
| 2854 } | |
| 2855 } | |
| 2856 | |
| 2857 /* Fixup the tail of the loop. */ | |
| 2858 static void asm_loop_tail_fixup(ASMState *as) | |
| 2859 { | |
| 2860 UNUSED(as); /* Nothing to do. */ | |
| 2861 } | |
| 2862 | |
| 2863 /* -- Head of trace ------------------------------------------------------- */ | |
| 2864 | |
| 2865 /* Coalesce BASE register for a root trace. */ | |
| 2866 static void asm_head_root_base(ASMState *as) | |
| 2867 { | |
| 2868 IRIns *ir = IR(REF_BASE); | |
| 2869 Reg r = ir->r; | |
| 2870 if (ra_hasreg(r)) { | |
| 2871 ra_free(as, r); | |
| 2872 if (rset_test(as->modset, r) || irt_ismarked(ir->t)) | |
| 2873 ir->r = RID_INIT; /* No inheritance for modified BASE register. */ | |
| 2874 if (r != RID_BASE) | |
| 2875 emit_rr(as, XO_MOV, r|REX_GC64, RID_BASE); | |
| 2876 } | |
| 2877 } | |
| 2878 | |
| 2879 /* Coalesce or reload BASE register for a side trace. */ | |
| 2880 static Reg asm_head_side_base(ASMState *as, IRIns *irp) | |
| 2881 { | |
| 2882 IRIns *ir = IR(REF_BASE); | |
| 2883 Reg r = ir->r; | |
| 2884 if (ra_hasreg(r)) { | |
| 2885 ra_free(as, r); | |
| 2886 if (rset_test(as->modset, r) || irt_ismarked(ir->t)) | |
| 2887 ir->r = RID_INIT; /* No inheritance for modified BASE register. */ | |
| 2888 if (irp->r == r) { | |
| 2889 return r; /* Same BASE register already coalesced. */ | |
| 2890 } else if (ra_hasreg(irp->r) && rset_test(as->freeset, irp->r)) { | |
| 2891 /* Move from coalesced parent reg. */ | |
| 2892 emit_rr(as, XO_MOV, r|REX_GC64, irp->r); | |
| 2893 return irp->r; | |
| 2894 } else { | |
| 2895 emit_getgl(as, r, jit_base); /* Otherwise reload BASE. */ | |
| 2896 } | |
| 2897 } | |
| 2898 return RID_NONE; | |
| 2899 } | |
| 2900 | |
| 2901 /* -- Tail of trace ------------------------------------------------------- */ | |
| 2902 | |
| 2903 /* Fixup the tail code. */ | |
| 2904 static void asm_tail_fixup(ASMState *as, TraceNo lnk) | |
| 2905 { | |
| 2906 /* Note: don't use as->mcp swap + emit_*: emit_op overwrites more bytes. */ | |
| 2907 MCode *p = as->mctop; | |
| 2908 MCode *target, *q; | |
| 2909 int32_t spadj = as->T->spadjust; | |
| 2910 if (spadj == 0) { | |
| 2911 p -= LJ_64 ? 7 : 6; | |
| 2912 } else { | |
| 2913 MCode *p1; | |
| 2914 /* Patch stack adjustment. */ | |
| 2915 if (checki8(spadj)) { | |
| 2916 p -= 3; | |
| 2917 p1 = p-6; | |
| 2918 *p1 = (MCode)spadj; | |
| 2919 } else { | |
| 2920 p1 = p-9; | |
| 2921 *(int32_t *)p1 = spadj; | |
| 2922 } | |
| 2923 #if LJ_64 | |
| 2924 p1[-3] = 0x48; | |
| 2925 #endif | |
| 2926 p1[-2] = (MCode)(checki8(spadj) ? XI_ARITHi8 : XI_ARITHi); | |
| 2927 p1[-1] = MODRM(XM_REG, XOg_ADD, RID_ESP); | |
| 2928 } | |
| 2929 /* Patch exit branch. */ | |
| 2930 target = lnk ? traceref(as->J, lnk)->mcode : (MCode *)lj_vm_exit_interp; | |
| 2931 *(int32_t *)(p-4) = jmprel(as->J, p, target); | |
| 2932 p[-5] = XI_JMP; | |
| 2933 /* Drop unused mcode tail. Fill with NOPs to make the prefetcher happy. */ | |
| 2934 for (q = as->mctop-1; q >= p; q--) | |
| 2935 *q = XI_NOP; | |
| 2936 as->mctop = p; | |
| 2937 } | |
| 2938 | |
| 2939 /* Prepare tail of code. */ | |
| 2940 static void asm_tail_prep(ASMState *as) | |
| 2941 { | |
| 2942 MCode *p = as->mctop; | |
| 2943 /* Realign and leave room for backwards loop branch or exit branch. */ | |
| 2944 if (as->realign) { | |
| 2945 int i = ((int)(intptr_t)as->realign) & 15; | |
| 2946 /* Fill unused mcode tail with NOPs to make the prefetcher happy. */ | |
| 2947 while (i-- > 0) | |
| 2948 *--p = XI_NOP; | |
| 2949 as->mctop = p; | |
| 2950 p -= (as->loopinv ? 5 : 2); /* Space for short/near jmp. */ | |
| 2951 } else { | |
| 2952 p -= 5; /* Space for exit branch (near jmp). */ | |
| 2953 } | |
| 2954 if (as->loopref) { | |
| 2955 as->invmcp = as->mcp = p; | |
| 2956 } else { | |
| 2957 /* Leave room for ESP adjustment: add esp, imm or lea esp, [esp+imm] */ | |
| 2958 as->mcp = p - (LJ_64 ? 7 : 6); | |
| 2959 as->invmcp = NULL; | |
| 2960 } | |
| 2961 } | |
| 2962 | |
| 2963 /* -- Trace setup --------------------------------------------------------- */ | |
| 2964 | |
| 2965 /* Ensure there are enough stack slots for call arguments. */ | |
| 2966 static Reg asm_setup_call_slots(ASMState *as, IRIns *ir, const CCallInfo *ci) | |
| 2967 { | |
| 2968 IRRef args[CCI_NARGS_MAX*2]; | |
| 2969 int nslots; | |
| 2970 asm_collectargs(as, ir, ci, args); | |
| 2971 nslots = asm_count_call_slots(as, ci, args); | |
| 2972 if (nslots > as->evenspill) /* Leave room for args in stack slots. */ | |
| 2973 as->evenspill = nslots; | |
| 2974 #if LJ_64 | |
| 2975 return irt_isfp(ir->t) ? REGSP_HINT(RID_FPRET) : REGSP_HINT(RID_RET); | |
| 2976 #else | |
| 2977 return irt_isfp(ir->t) ? REGSP_INIT : REGSP_HINT(RID_RET); | |
| 2978 #endif | |
| 2979 } | |
| 2980 | |
| 2981 /* Target-specific setup. */ | |
| 2982 static void asm_setup_target(ASMState *as) | |
| 2983 { | |
| 2984 asm_exitstub_setup(as, as->T->nsnap); | |
| 2985 as->mrm.base = 0; | |
| 2986 } | |
| 2987 | |
| 2988 /* -- Trace patching ------------------------------------------------------ */ | |
| 2989 | |
| 2990 static const uint8_t map_op1[256] = { | |
| 2991 0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x20, | |
| 2992 0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x51,0x51, | |
| 2993 0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51, | |
| 2994 0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51,0x92,0x92,0x92,0x92,0x52,0x45,0x10,0x51, | |
| 2995 #if LJ_64 | |
| 2996 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x14,0x14,0x14,0x14,0x14,0x14,0x14,0x14, | |
| 2997 #else | |
| 2998 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51, | |
| 2999 #endif | |
| 3000 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51, | |
| 3001 0x51,0x51,0x92,0x92,0x10,0x10,0x12,0x11,0x45,0x86,0x52,0x93,0x51,0x51,0x51,0x51, | |
| 3002 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52, | |
| 3003 0x93,0x86,0x93,0x93,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92, | |
| 3004 0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x51,0x47,0x51,0x51,0x51,0x51,0x51, | |
| 3005 #if LJ_64 | |
| 3006 0x59,0x59,0x59,0x59,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51, | |
| 3007 #else | |
| 3008 0x55,0x55,0x55,0x55,0x51,0x51,0x51,0x51,0x52,0x45,0x51,0x51,0x51,0x51,0x51,0x51, | |
| 3009 #endif | |
| 3010 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x05,0x05,0x05,0x05,0x05,0x05,0x05,0x05, | |
| 3011 0x93,0x93,0x53,0x51,0x70,0x71,0x93,0x86,0x54,0x51,0x53,0x51,0x51,0x52,0x51,0x51, | |
| 3012 0x92,0x92,0x92,0x92,0x52,0x52,0x51,0x51,0x92,0x92,0x92,0x92,0x92,0x92,0x92,0x92, | |
| 3013 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x45,0x45,0x47,0x52,0x51,0x51,0x51,0x51, | |
| 3014 0x10,0x51,0x10,0x10,0x51,0x51,0x63,0x66,0x51,0x51,0x51,0x51,0x51,0x51,0x92,0x92 | |
| 3015 }; | |
| 3016 | |
| 3017 static const uint8_t map_op2[256] = { | |
| 3018 0x93,0x93,0x93,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x51,0x52,0x51,0x93,0x52,0x94, | |
| 3019 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93, | |
| 3020 0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93, | |
| 3021 0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x34,0x51,0x35,0x51,0x51,0x51,0x51,0x51, | |
| 3022 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93, | |
| 3023 0x53,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93, | |
| 3024 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93, | |
| 3025 0x94,0x54,0x54,0x54,0x93,0x93,0x93,0x52,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93, | |
| 3026 0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46,0x46, | |
| 3027 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93, | |
| 3028 0x52,0x52,0x52,0x93,0x94,0x93,0x51,0x51,0x52,0x52,0x52,0x93,0x94,0x93,0x93,0x93, | |
| 3029 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x94,0x93,0x93,0x93,0x93,0x93, | |
| 3030 0x93,0x93,0x94,0x93,0x94,0x94,0x94,0x93,0x52,0x52,0x52,0x52,0x52,0x52,0x52,0x52, | |
| 3031 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93, | |
| 3032 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93, | |
| 3033 0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x93,0x52 | |
| 3034 }; | |
| 3035 | |
| 3036 static uint32_t asm_x86_inslen(const uint8_t* p) | |
| 3037 { | |
| 3038 uint32_t result = 0; | |
| 3039 uint32_t prefixes = 0; | |
| 3040 uint32_t x = map_op1[*p]; | |
| 3041 for (;;) { | |
| 3042 switch (x >> 4) { | |
| 3043 case 0: return result + x + (prefixes & 4); | |
| 3044 case 1: prefixes |= x; x = map_op1[*++p]; result++; break; | |
| 3045 case 2: x = map_op2[*++p]; break; | |
| 3046 case 3: p++; goto mrm; | |
| 3047 case 4: result -= (prefixes & 2); /* fallthrough */ | |
| 3048 case 5: return result + (x & 15); | |
| 3049 case 6: /* Group 3. */ | |
| 3050 if (p[1] & 0x38) x = 2; | |
| 3051 else if ((prefixes & 2) && (x == 0x66)) x = 4; | |
| 3052 goto mrm; | |
| 3053 case 7: /* VEX c4/c5. */ | |
| 3054 if (LJ_32 && p[1] < 0xc0) { | |
| 3055 x = 2; | |
| 3056 goto mrm; | |
| 3057 } | |
| 3058 if (x == 0x70) { | |
| 3059 x = *++p & 0x1f; | |
| 3060 result++; | |
| 3061 if (x >= 2) { | |
| 3062 p += 2; | |
| 3063 result += 2; | |
| 3064 goto mrm; | |
| 3065 } | |
| 3066 } | |
| 3067 p++; | |
| 3068 result++; | |
| 3069 x = map_op2[*++p]; | |
| 3070 break; | |
| 3071 case 8: result -= (prefixes & 2); /* fallthrough */ | |
| 3072 case 9: mrm: /* ModR/M and possibly SIB. */ | |
| 3073 result += (x & 15); | |
| 3074 x = *++p; | |
| 3075 switch (x >> 6) { | |
| 3076 case 0: if ((x & 7) == 5) return result + 4; break; | |
| 3077 case 1: result++; break; | |
| 3078 case 2: result += 4; break; | |
| 3079 case 3: return result; | |
| 3080 } | |
| 3081 if ((x & 7) == 4) { | |
| 3082 result++; | |
| 3083 if (x < 0x40 && (p[1] & 7) == 5) result += 4; | |
| 3084 } | |
| 3085 return result; | |
| 3086 } | |
| 3087 } | |
| 3088 } | |
| 3089 | |
| 3090 /* Patch exit jumps of existing machine code to a new target. */ | |
| 3091 void lj_asm_patchexit(jit_State *J, GCtrace *T, ExitNo exitno, MCode *target) | |
| 3092 { | |
| 3093 MCode *p = T->mcode; | |
| 3094 MCode *mcarea = lj_mcode_patch(J, p, 0); | |
| 3095 MSize len = T->szmcode; | |
| 3096 MCode *px = exitstub_addr(J, exitno) - 6; | |
| 3097 MCode *pe = p+len-6; | |
| 3098 MCode *pgc = NULL; | |
| 3099 #if LJ_GC64 | |
| 3100 uint32_t statei = (uint32_t)(GG_OFS(g.vmstate) - GG_OFS(dispatch)); | |
| 3101 #else | |
| 3102 uint32_t statei = u32ptr(&J2G(J)->vmstate); | |
| 3103 #endif | |
| 3104 if (len > 5 && p[len-5] == XI_JMP && p+len-6 + *(int32_t *)(p+len-4) == px) | |
| 3105 *(int32_t *)(p+len-4) = jmprel(J, p+len, target); | |
| 3106 /* Do not patch parent exit for a stack check. Skip beyond vmstate update. */ | |
| 3107 for (; p < pe; p += asm_x86_inslen(p)) { | |
| 3108 intptr_t ofs = LJ_GC64 ? (p[0] & 0xf0) == 0x40 : LJ_64; | |
| 3109 if (*(uint32_t *)(p+2+ofs) == statei && p[ofs+LJ_GC64-LJ_64] == XI_MOVmi) | |
| 3110 break; | |
| 3111 } | |
| 3112 lj_assertJ(p < pe, "instruction length decoder failed"); | |
| 3113 for (; p < pe; p += asm_x86_inslen(p)) { | |
| 3114 if ((*(uint16_t *)p & 0xf0ff) == 0x800f && p + *(int32_t *)(p+2) == px && | |
| 3115 p != pgc) { | |
| 3116 *(int32_t *)(p+2) = jmprel(J, p+6, target); | |
| 3117 } else if (*p == XI_CALL && | |
| 3118 (void *)(p+5+*(int32_t *)(p+1)) == (void *)lj_gc_step_jit) { | |
| 3119 pgc = p+7; /* Do not patch GC check exit. */ | |
| 3120 } | |
| 3121 } | |
| 3122 lj_mcode_sync(T->mcode, T->mcode + T->szmcode); | |
| 3123 lj_mcode_patch(J, mcarea, 1); | |
| 3124 } | |
| 3125 |