comparison third_party/luajit/src/lj_asm.c @ 178:94705b5986b3

[ThirdParty] Added WRK and luajit for load testing.
author MrJuneJune <me@mrjunejune.com>
date Thu, 22 Jan 2026 20:10:30 -0800
parents
children
comparison
equal deleted inserted replaced
177:24fe8ff94056 178:94705b5986b3
1 /*
2 ** IR assembler (SSA IR -> machine code).
3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h
4 */
5
6 #define lj_asm_c
7 #define LUA_CORE
8
9 #include "lj_obj.h"
10
11 #if LJ_HASJIT
12
13 #include "lj_gc.h"
14 #include "lj_buf.h"
15 #include "lj_str.h"
16 #include "lj_tab.h"
17 #include "lj_frame.h"
18 #if LJ_HASFFI
19 #include "lj_ctype.h"
20 #endif
21 #include "lj_ir.h"
22 #include "lj_jit.h"
23 #include "lj_ircall.h"
24 #include "lj_iropt.h"
25 #include "lj_mcode.h"
26 #include "lj_trace.h"
27 #include "lj_snap.h"
28 #include "lj_asm.h"
29 #include "lj_dispatch.h"
30 #include "lj_vm.h"
31 #include "lj_target.h"
32
33 #ifdef LUA_USE_ASSERT
34 #include <stdio.h>
35 #endif
36
37 /* -- Assembler state and common macros ----------------------------------- */
38
39 /* Assembler state. */
40 typedef struct ASMState {
41 RegCost cost[RID_MAX]; /* Reference and blended allocation cost for regs. */
42
43 MCode *mcp; /* Current MCode pointer (grows down). */
44 MCode *mclim; /* Lower limit for MCode memory + red zone. */
45 #ifdef LUA_USE_ASSERT
46 MCode *mcp_prev; /* Red zone overflow check. */
47 #endif
48
49 IRIns *ir; /* Copy of pointer to IR instructions/constants. */
50 jit_State *J; /* JIT compiler state. */
51
52 #if LJ_TARGET_X86ORX64
53 x86ModRM mrm; /* Fused x86 address operand. */
54 #endif
55
56 RegSet freeset; /* Set of free registers. */
57 RegSet modset; /* Set of registers modified inside the loop. */
58 RegSet weakset; /* Set of weakly referenced registers. */
59 RegSet phiset; /* Set of PHI registers. */
60
61 uint32_t flags; /* Copy of JIT compiler flags. */
62 int loopinv; /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */
63
64 int32_t evenspill; /* Next even spill slot. */
65 int32_t oddspill; /* Next odd spill slot (or 0). */
66
67 IRRef curins; /* Reference of current instruction. */
68 IRRef stopins; /* Stop assembly before hitting this instruction. */
69 IRRef orignins; /* Original T->nins. */
70
71 IRRef snapref; /* Current snapshot is active after this reference. */
72 IRRef snaprename; /* Rename highwater mark for snapshot check. */
73 SnapNo snapno; /* Current snapshot number. */
74 SnapNo loopsnapno; /* Loop snapshot number. */
75 int snapalloc; /* Current snapshot needs allocation. */
76 BloomFilter snapfilt1, snapfilt2; /* Filled with snapshot refs. */
77
78 IRRef fuseref; /* Fusion limit (loopref, 0 or FUSE_DISABLED). */
79 IRRef sectref; /* Section base reference (loopref or 0). */
80 IRRef loopref; /* Reference of LOOP instruction (or 0). */
81
82 BCReg topslot; /* Number of slots for stack check (unless 0). */
83 int32_t gcsteps; /* Accumulated number of GC steps (per section). */
84
85 GCtrace *T; /* Trace to assemble. */
86 GCtrace *parent; /* Parent trace (or NULL). */
87
88 MCode *mcbot; /* Bottom of reserved MCode. */
89 MCode *mctop; /* Top of generated MCode. */
90 MCode *mctoporig; /* Original top of generated MCode. */
91 MCode *mcloop; /* Pointer to loop MCode (or NULL). */
92 MCode *invmcp; /* Points to invertible loop branch (or NULL). */
93 MCode *flagmcp; /* Pending opportunity to merge flag setting ins. */
94 MCode *realign; /* Realign loop if not NULL. */
95
96 #ifdef RID_NUM_KREF
97 intptr_t krefk[RID_NUM_KREF];
98 #endif
99 IRRef1 phireg[RID_MAX]; /* PHI register references. */
100 uint16_t parentmap[LJ_MAX_JSLOTS]; /* Parent instruction to RegSP map. */
101 } ASMState;
102
103 #ifdef LUA_USE_ASSERT
104 #define lj_assertA(c, ...) lj_assertG_(J2G(as->J), (c), __VA_ARGS__)
105 #else
106 #define lj_assertA(c, ...) ((void)as)
107 #endif
108
109 #define IR(ref) (&as->ir[(ref)])
110
111 #define ASMREF_TMP1 REF_TRUE /* Temp. register. */
112 #define ASMREF_TMP2 REF_FALSE /* Temp. register. */
113 #define ASMREF_L REF_NIL /* Stores register for L. */
114
115 /* Check for variant to invariant references. */
116 #define iscrossref(as, ref) ((ref) < as->sectref)
117
118 /* Inhibit memory op fusion from variant to invariant references. */
119 #define FUSE_DISABLED (~(IRRef)0)
120 #define mayfuse(as, ref) ((ref) > as->fuseref)
121 #define neverfuse(as) (as->fuseref == FUSE_DISABLED)
122 #define canfuse(as, ir) (!neverfuse(as) && !irt_isphi((ir)->t))
123 #define opisfusableload(o) \
124 ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
125 (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)
126
127 /* Sparse limit checks using a red zone before the actual limit. */
128 #define MCLIM_REDZONE 64
129
130 static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as)
131 {
132 lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE));
133 }
134
135 static LJ_AINLINE void checkmclim(ASMState *as)
136 {
137 #ifdef LUA_USE_ASSERT
138 if (as->mcp + MCLIM_REDZONE < as->mcp_prev) {
139 IRIns *ir = IR(as->curins+1);
140 lj_assertA(0, "red zone overflow: %p IR %04d %02d %04d %04d\n", as->mcp,
141 as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS);
142 }
143 #endif
144 if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as);
145 #ifdef LUA_USE_ASSERT
146 as->mcp_prev = as->mcp;
147 #endif
148 }
149
150 #ifdef RID_NUM_KREF
151 #define ra_iskref(ref) ((ref) < RID_NUM_KREF)
152 #define ra_krefreg(ref) ((Reg)(RID_MIN_KREF + (Reg)(ref)))
153 #define ra_krefk(as, ref) (as->krefk[(ref)])
154
155 static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, intptr_t k)
156 {
157 IRRef ref = (IRRef)(r - RID_MIN_KREF);
158 as->krefk[ref] = k;
159 as->cost[r] = REGCOST(ref, ref);
160 }
161
162 #else
163 #define ra_iskref(ref) 0
164 #define ra_krefreg(ref) RID_MIN_GPR
165 #define ra_krefk(as, ref) 0
166 #endif
167
168 /* Arch-specific field offsets. */
169 static const uint8_t field_ofs[IRFL__MAX+1] = {
170 #define FLOFS(name, ofs) (uint8_t)(ofs),
171 IRFLDEF(FLOFS)
172 #undef FLOFS
173 0
174 };
175
176 /* -- Target-specific instruction emitter --------------------------------- */
177
178 #if LJ_TARGET_X86ORX64
179 #include "lj_emit_x86.h"
180 #elif LJ_TARGET_ARM
181 #include "lj_emit_arm.h"
182 #elif LJ_TARGET_ARM64
183 #include "lj_emit_arm64.h"
184 #elif LJ_TARGET_PPC
185 #include "lj_emit_ppc.h"
186 #elif LJ_TARGET_MIPS
187 #include "lj_emit_mips.h"
188 #else
189 #error "Missing instruction emitter for target CPU"
190 #endif
191
192 /* Generic load/store of register from/to stack slot. */
193 #define emit_spload(as, ir, r, ofs) \
194 emit_loadofs(as, ir, (r), RID_SP, (ofs))
195 #define emit_spstore(as, ir, r, ofs) \
196 emit_storeofs(as, ir, (r), RID_SP, (ofs))
197
198 /* -- Register allocator debugging ---------------------------------------- */
199
200 /* #define LUAJIT_DEBUG_RA */
201
202 #ifdef LUAJIT_DEBUG_RA
203
204 #include <stdio.h>
205 #include <stdarg.h>
206
207 #define RIDNAME(name) #name,
208 static const char *const ra_regname[] = {
209 GPRDEF(RIDNAME)
210 FPRDEF(RIDNAME)
211 VRIDDEF(RIDNAME)
212 NULL
213 };
214 #undef RIDNAME
215
216 static char ra_dbg_buf[65536];
217 static char *ra_dbg_p;
218 static char *ra_dbg_merge;
219 static MCode *ra_dbg_mcp;
220
221 static void ra_dstart(void)
222 {
223 ra_dbg_p = ra_dbg_buf;
224 ra_dbg_merge = NULL;
225 ra_dbg_mcp = NULL;
226 }
227
228 static void ra_dflush(void)
229 {
230 fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout);
231 ra_dstart();
232 }
233
234 static void ra_dprintf(ASMState *as, const char *fmt, ...)
235 {
236 char *p;
237 va_list argp;
238 va_start(argp, fmt);
239 p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p;
240 ra_dbg_mcp = NULL;
241 p += sprintf(p, "%08x \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS);
242 for (;;) {
243 const char *e = strchr(fmt, '$');
244 if (e == NULL) break;
245 memcpy(p, fmt, (size_t)(e-fmt));
246 p += e-fmt;
247 if (e[1] == 'r') {
248 Reg r = va_arg(argp, Reg) & RID_MASK;
249 if (r <= RID_MAX) {
250 const char *q;
251 for (q = ra_regname[r]; *q; q++)
252 *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
253 } else {
254 *p++ = '?';
255 lj_assertA(0, "bad register %d for debug format \"%s\"", r, fmt);
256 }
257 } else if (e[1] == 'f' || e[1] == 'i') {
258 IRRef ref;
259 if (e[1] == 'f')
260 ref = va_arg(argp, IRRef);
261 else
262 ref = va_arg(argp, IRIns *) - as->ir;
263 if (ref >= REF_BIAS)
264 p += sprintf(p, "%04d", ref - REF_BIAS);
265 else
266 p += sprintf(p, "K%03d", REF_BIAS - ref);
267 } else if (e[1] == 's') {
268 uint32_t slot = va_arg(argp, uint32_t);
269 p += sprintf(p, "[sp+0x%x]", sps_scale(slot));
270 } else if (e[1] == 'x') {
271 p += sprintf(p, "%08x", va_arg(argp, int32_t));
272 } else {
273 lj_assertA(0, "bad debug format code");
274 }
275 fmt = e+2;
276 }
277 va_end(argp);
278 while (*fmt)
279 *p++ = *fmt++;
280 *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n';
281 if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) {
282 fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout);
283 p = ra_dbg_buf;
284 }
285 ra_dbg_p = p;
286 }
287
288 #define RA_DBG_START() ra_dstart()
289 #define RA_DBG_FLUSH() ra_dflush()
290 #define RA_DBG_REF() \
291 do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
292 ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
293 #define RA_DBGX(x) ra_dprintf x
294
295 #else
296 #define RA_DBG_START() ((void)0)
297 #define RA_DBG_FLUSH() ((void)0)
298 #define RA_DBG_REF() ((void)0)
299 #define RA_DBGX(x) ((void)0)
300 #endif
301
302 /* -- Register allocator -------------------------------------------------- */
303
304 #define ra_free(as, r) rset_set(as->freeset, (r))
305 #define ra_modified(as, r) rset_set(as->modset, (r))
306 #define ra_weak(as, r) rset_set(as->weakset, (r))
307 #define ra_noweak(as, r) rset_clear(as->weakset, (r))
308
309 #define ra_used(ir) (ra_hasreg((ir)->r) || ra_hasspill((ir)->s))
310
311 /* Setup register allocator. */
312 static void ra_setup(ASMState *as)
313 {
314 Reg r;
315 /* Initially all regs (except the stack pointer) are free for use. */
316 as->freeset = RSET_INIT;
317 as->modset = RSET_EMPTY;
318 as->weakset = RSET_EMPTY;
319 as->phiset = RSET_EMPTY;
320 memset(as->phireg, 0, sizeof(as->phireg));
321 for (r = RID_MIN_GPR; r < RID_MAX; r++)
322 as->cost[r] = REGCOST(~0u, 0u);
323 }
324
325 /* Rematerialize constants. */
326 static Reg ra_rematk(ASMState *as, IRRef ref)
327 {
328 IRIns *ir;
329 Reg r;
330 if (ra_iskref(ref)) {
331 r = ra_krefreg(ref);
332 lj_assertA(!rset_test(as->freeset, r), "rematk of free reg %d", r);
333 ra_free(as, r);
334 ra_modified(as, r);
335 #if LJ_64
336 emit_loadu64(as, r, ra_krefk(as, ref));
337 #else
338 emit_loadi(as, r, ra_krefk(as, ref));
339 #endif
340 return r;
341 }
342 ir = IR(ref);
343 r = ir->r;
344 lj_assertA(ra_hasreg(r), "rematk of K%03d has no reg", REF_BIAS - ref);
345 lj_assertA(!ra_hasspill(ir->s),
346 "rematk of K%03d has spill slot [%x]", REF_BIAS - ref, ir->s);
347 ra_free(as, r);
348 ra_modified(as, r);
349 ir->r = RID_INIT; /* Do not keep any hint. */
350 RA_DBGX((as, "remat $i $r", ir, r));
351 #if !LJ_SOFTFP32
352 if (ir->o == IR_KNUM) {
353 emit_loadk64(as, r, ir);
354 } else
355 #endif
356 if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
357 ra_sethint(ir->r, RID_BASE); /* Restore BASE register hint. */
358 emit_getgl(as, r, jit_base);
359 } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
360 /* REF_NIL stores ASMREF_L register. */
361 lj_assertA(irt_isnil(ir->t), "rematk of bad ASMREF_L");
362 emit_getgl(as, r, cur_L);
363 #if LJ_64
364 } else if (ir->o == IR_KINT64) {
365 emit_loadu64(as, r, ir_kint64(ir)->u64);
366 #if LJ_GC64
367 } else if (ir->o == IR_KGC) {
368 emit_loadu64(as, r, (uintptr_t)ir_kgc(ir));
369 } else if (ir->o == IR_KPTR || ir->o == IR_KKPTR) {
370 emit_loadu64(as, r, (uintptr_t)ir_kptr(ir));
371 #endif
372 #endif
373 } else {
374 lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
375 ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
376 "rematk of bad IR op %d", ir->o);
377 emit_loadi(as, r, ir->i);
378 }
379 return r;
380 }
381
382 /* Force a spill. Allocate a new spill slot if needed. */
383 static int32_t ra_spill(ASMState *as, IRIns *ir)
384 {
385 int32_t slot = ir->s;
386 lj_assertA(ir >= as->ir + REF_TRUE,
387 "spill of K%03d", REF_BIAS - (int)(ir - as->ir));
388 if (!ra_hasspill(slot)) {
389 if (irt_is64(ir->t)) {
390 slot = as->evenspill;
391 as->evenspill += 2;
392 } else if (as->oddspill) {
393 slot = as->oddspill;
394 as->oddspill = 0;
395 } else {
396 slot = as->evenspill;
397 as->oddspill = slot+1;
398 as->evenspill += 2;
399 }
400 if (as->evenspill > 256)
401 lj_trace_err(as->J, LJ_TRERR_SPILLOV);
402 ir->s = (uint8_t)slot;
403 }
404 return sps_scale(slot);
405 }
406
407 /* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
408 static Reg ra_releasetmp(ASMState *as, IRRef ref)
409 {
410 IRIns *ir = IR(ref);
411 Reg r = ir->r;
412 lj_assertA(ra_hasreg(r), "release of TMP%d has no reg", ref-ASMREF_TMP1+1);
413 lj_assertA(!ra_hasspill(ir->s),
414 "release of TMP%d has spill slot [%x]", ref-ASMREF_TMP1+1, ir->s);
415 ra_free(as, r);
416 ra_modified(as, r);
417 ir->r = RID_INIT;
418 return r;
419 }
420
421 /* Restore a register (marked as free). Rematerialize or force a spill. */
422 static Reg ra_restore(ASMState *as, IRRef ref)
423 {
424 if (emit_canremat(ref)) {
425 return ra_rematk(as, ref);
426 } else {
427 IRIns *ir = IR(ref);
428 int32_t ofs = ra_spill(as, ir); /* Force a spill slot. */
429 Reg r = ir->r;
430 lj_assertA(ra_hasreg(r), "restore of IR %04d has no reg", ref - REF_BIAS);
431 ra_sethint(ir->r, r); /* Keep hint. */
432 ra_free(as, r);
433 if (!rset_test(as->weakset, r)) { /* Only restore non-weak references. */
434 ra_modified(as, r);
435 RA_DBGX((as, "restore $i $r", ir, r));
436 emit_spload(as, ir, r, ofs);
437 }
438 return r;
439 }
440 }
441
442 /* Save a register to a spill slot. */
443 static void ra_save(ASMState *as, IRIns *ir, Reg r)
444 {
445 RA_DBGX((as, "save $i $r", ir, r));
446 emit_spstore(as, ir, r, sps_scale(ir->s));
447 }
448
449 #define MINCOST(name) \
450 if (rset_test(RSET_ALL, RID_##name) && \
451 LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
452 cost = as->cost[RID_##name];
453
454 /* Evict the register with the lowest cost, forcing a restore. */
455 static Reg ra_evict(ASMState *as, RegSet allow)
456 {
457 IRRef ref;
458 RegCost cost = ~(RegCost)0;
459 lj_assertA(allow != RSET_EMPTY, "evict from empty set");
460 if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
461 GPRDEF(MINCOST)
462 } else {
463 FPRDEF(MINCOST)
464 }
465 ref = regcost_ref(cost);
466 lj_assertA(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins),
467 "evict of out-of-range IR %04d", ref - REF_BIAS);
468 /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
469 if (!irref_isk(ref) && (as->weakset & allow)) {
470 IRIns *ir = IR(ref);
471 if (!rset_test(as->weakset, ir->r))
472 ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]);
473 }
474 return ra_restore(as, ref);
475 }
476
477 /* Pick any register (marked as free). Evict on-demand. */
478 static Reg ra_pick(ASMState *as, RegSet allow)
479 {
480 RegSet pick = as->freeset & allow;
481 if (!pick)
482 return ra_evict(as, allow);
483 else
484 return rset_picktop(pick);
485 }
486
487 /* Get a scratch register (marked as free). */
488 static Reg ra_scratch(ASMState *as, RegSet allow)
489 {
490 Reg r = ra_pick(as, allow);
491 ra_modified(as, r);
492 RA_DBGX((as, "scratch $r", r));
493 return r;
494 }
495
496 /* Evict all registers from a set (if not free). */
497 static void ra_evictset(ASMState *as, RegSet drop)
498 {
499 RegSet work;
500 as->modset |= drop;
501 #if !LJ_SOFTFP
502 work = (drop & ~as->freeset) & RSET_FPR;
503 while (work) {
504 Reg r = rset_pickbot(work);
505 ra_restore(as, regcost_ref(as->cost[r]));
506 rset_clear(work, r);
507 checkmclim(as);
508 }
509 #endif
510 work = (drop & ~as->freeset);
511 while (work) {
512 Reg r = rset_pickbot(work);
513 ra_restore(as, regcost_ref(as->cost[r]));
514 rset_clear(work, r);
515 checkmclim(as);
516 }
517 }
518
519 /* Evict (rematerialize) all registers allocated to constants. */
520 static void ra_evictk(ASMState *as)
521 {
522 RegSet work;
523 #if !LJ_SOFTFP
524 work = ~as->freeset & RSET_FPR;
525 while (work) {
526 Reg r = rset_pickbot(work);
527 IRRef ref = regcost_ref(as->cost[r]);
528 if (emit_canremat(ref) && irref_isk(ref)) {
529 ra_rematk(as, ref);
530 checkmclim(as);
531 }
532 rset_clear(work, r);
533 }
534 #endif
535 work = ~as->freeset & RSET_GPR;
536 while (work) {
537 Reg r = rset_pickbot(work);
538 IRRef ref = regcost_ref(as->cost[r]);
539 if (emit_canremat(ref) && irref_isk(ref)) {
540 ra_rematk(as, ref);
541 checkmclim(as);
542 }
543 rset_clear(work, r);
544 }
545 }
546
547 #ifdef RID_NUM_KREF
548 /* Allocate a register for a constant. */
549 static Reg ra_allock(ASMState *as, intptr_t k, RegSet allow)
550 {
551 /* First try to find a register which already holds the same constant. */
552 RegSet pick, work = ~as->freeset & RSET_GPR;
553 Reg r;
554 while (work) {
555 IRRef ref;
556 r = rset_pickbot(work);
557 ref = regcost_ref(as->cost[r]);
558 #if LJ_64
559 if (ref < ASMREF_L) {
560 if (ra_iskref(ref)) {
561 if (k == ra_krefk(as, ref))
562 return r;
563 } else {
564 IRIns *ir = IR(ref);
565 if ((ir->o == IR_KINT64 && k == (int64_t)ir_kint64(ir)->u64) ||
566 #if LJ_GC64
567 (ir->o == IR_KINT && k == ir->i) ||
568 (ir->o == IR_KGC && k == (intptr_t)ir_kgc(ir)) ||
569 ((ir->o == IR_KPTR || ir->o == IR_KKPTR) &&
570 k == (intptr_t)ir_kptr(ir))
571 #else
572 (ir->o != IR_KINT64 && k == ir->i)
573 #endif
574 )
575 return r;
576 }
577 }
578 #else
579 if (ref < ASMREF_L &&
580 k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
581 return r;
582 #endif
583 rset_clear(work, r);
584 }
585 pick = as->freeset & allow;
586 if (pick) {
587 /* Constants should preferably get unmodified registers. */
588 if ((pick & ~as->modset))
589 pick &= ~as->modset;
590 r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
591 } else {
592 r = ra_evict(as, allow);
593 }
594 RA_DBGX((as, "allock $x $r", k, r));
595 ra_setkref(as, r, k);
596 rset_clear(as->freeset, r);
597 ra_noweak(as, r);
598 return r;
599 }
600
601 /* Allocate a specific register for a constant. */
602 static void ra_allockreg(ASMState *as, intptr_t k, Reg r)
603 {
604 Reg kr = ra_allock(as, k, RID2RSET(r));
605 if (kr != r) {
606 IRIns irdummy;
607 irdummy.t.irt = IRT_INT;
608 ra_scratch(as, RID2RSET(r));
609 emit_movrr(as, &irdummy, r, kr);
610 }
611 }
612 #else
613 #define ra_allockreg(as, k, r) emit_loadi(as, (r), (k))
614 #endif
615
616 /* Allocate a register for ref from the allowed set of registers.
617 ** Note: this function assumes the ref does NOT have a register yet!
618 ** Picks an optimal register, sets the cost and marks the register as non-free.
619 */
620 static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
621 {
622 IRIns *ir = IR(ref);
623 RegSet pick = as->freeset & allow;
624 Reg r;
625 lj_assertA(ra_noreg(ir->r),
626 "IR %04d already has reg %d", ref - REF_BIAS, ir->r);
627 if (pick) {
628 /* First check register hint from propagation or PHI. */
629 if (ra_hashint(ir->r)) {
630 r = ra_gethint(ir->r);
631 if (rset_test(pick, r)) /* Use hint register if possible. */
632 goto found;
633 /* Rematerialization is cheaper than missing a hint. */
634 if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) {
635 ra_rematk(as, regcost_ref(as->cost[r]));
636 goto found;
637 }
638 RA_DBGX((as, "hintmiss $f $r", ref, r));
639 }
640 /* Invariants should preferably get unmodified registers. */
641 if (ref < as->loopref && !irt_isphi(ir->t)) {
642 if ((pick & ~as->modset))
643 pick &= ~as->modset;
644 r = rset_pickbot(pick); /* Reduce conflicts with inverse allocation. */
645 } else {
646 /* We've got plenty of regs, so get callee-save regs if possible. */
647 if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH))
648 pick &= ~RSET_SCRATCH;
649 r = rset_picktop(pick);
650 }
651 } else {
652 r = ra_evict(as, allow);
653 }
654 found:
655 RA_DBGX((as, "alloc $f $r", ref, r));
656 ir->r = (uint8_t)r;
657 rset_clear(as->freeset, r);
658 ra_noweak(as, r);
659 as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t));
660 return r;
661 }
662
663 /* Allocate a register on-demand. */
664 static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
665 {
666 Reg r = IR(ref)->r;
667 /* Note: allow is ignored if the register is already allocated. */
668 if (ra_noreg(r)) r = ra_allocref(as, ref, allow);
669 ra_noweak(as, r);
670 return r;
671 }
672
673 /* Add a register rename to the IR. */
674 static void ra_addrename(ASMState *as, Reg down, IRRef ref, SnapNo snapno)
675 {
676 IRRef ren;
677 lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, snapno);
678 ren = tref_ref(lj_ir_emit(as->J));
679 as->J->cur.ir[ren].r = (uint8_t)down;
680 as->J->cur.ir[ren].s = SPS_NONE;
681 }
682
683 /* Rename register allocation and emit move. */
684 static void ra_rename(ASMState *as, Reg down, Reg up)
685 {
686 IRRef ref = regcost_ref(as->cost[up] = as->cost[down]);
687 IRIns *ir = IR(ref);
688 ir->r = (uint8_t)up;
689 as->cost[down] = 0;
690 lj_assertA((down < RID_MAX_GPR) == (up < RID_MAX_GPR),
691 "rename between GPR/FPR %d and %d", down, up);
692 lj_assertA(!rset_test(as->freeset, down), "rename from free reg %d", down);
693 lj_assertA(rset_test(as->freeset, up), "rename to non-free reg %d", up);
694 ra_free(as, down); /* 'down' is free ... */
695 ra_modified(as, down);
696 rset_clear(as->freeset, up); /* ... and 'up' is now allocated. */
697 ra_noweak(as, up);
698 RA_DBGX((as, "rename $f $r $r", regcost_ref(as->cost[up]), down, up));
699 emit_movrr(as, ir, down, up); /* Backwards codegen needs inverse move. */
700 if (!ra_hasspill(IR(ref)->s)) { /* Add the rename to the IR. */
701 /*
702 ** The rename is effective at the subsequent (already emitted) exit
703 ** branch. This is for the current snapshot (as->snapno). Except if we
704 ** haven't yet allocated any refs for the snapshot (as->snapalloc == 1),
705 ** then it belongs to the next snapshot.
706 ** See also the discussion at asm_snap_checkrename().
707 */
708 ra_addrename(as, down, ref, as->snapno + as->snapalloc);
709 }
710 }
711
712 /* Pick a destination register (marked as free).
713 ** Caveat: allow is ignored if there's already a destination register.
714 ** Use ra_destreg() to get a specific register.
715 */
716 static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow)
717 {
718 Reg dest = ir->r;
719 if (ra_hasreg(dest)) {
720 ra_free(as, dest);
721 ra_modified(as, dest);
722 } else {
723 if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) {
724 dest = ra_gethint(dest);
725 ra_modified(as, dest);
726 RA_DBGX((as, "dest $r", dest));
727 } else {
728 dest = ra_scratch(as, allow);
729 }
730 ir->r = dest;
731 }
732 if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest);
733 return dest;
734 }
735
736 /* Force a specific destination register (marked as free). */
737 static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
738 {
739 Reg dest = ra_dest(as, ir, RID2RSET(r));
740 if (dest != r) {
741 lj_assertA(rset_test(as->freeset, r), "dest reg %d is not free", r);
742 ra_modified(as, r);
743 emit_movrr(as, ir, dest, r);
744 }
745 }
746
747 #if LJ_TARGET_X86ORX64
748 /* Propagate dest register to left reference. Emit moves as needed.
749 ** This is a required fixup step for all 2-operand machine instructions.
750 */
751 static void ra_left(ASMState *as, Reg dest, IRRef lref)
752 {
753 IRIns *ir = IR(lref);
754 Reg left = ir->r;
755 if (ra_noreg(left)) {
756 if (irref_isk(lref)) {
757 if (ir->o == IR_KNUM) {
758 /* FP remat needs a load except for +0. Still better than eviction. */
759 if (tvispzero(ir_knum(ir)) || !(as->freeset & RSET_FPR)) {
760 emit_loadk64(as, dest, ir);
761 return;
762 }
763 #if LJ_64
764 } else if (ir->o == IR_KINT64) {
765 emit_loadk64(as, dest, ir);
766 return;
767 #if LJ_GC64
768 } else if (ir->o == IR_KGC || ir->o == IR_KPTR || ir->o == IR_KKPTR) {
769 emit_loadk64(as, dest, ir);
770 return;
771 #endif
772 #endif
773 } else if (ir->o != IR_KPRI) {
774 lj_assertA(ir->o == IR_KINT || ir->o == IR_KGC ||
775 ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL,
776 "K%03d has bad IR op %d", REF_BIAS - lref, ir->o);
777 emit_loadi(as, dest, ir->i);
778 return;
779 }
780 }
781 if (!ra_hashint(left) && !iscrossref(as, lref))
782 ra_sethint(ir->r, dest); /* Propagate register hint. */
783 left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR);
784 }
785 ra_noweak(as, left);
786 /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
787 if (dest != left) {
788 /* Use register renaming if dest is the PHI reg. */
789 if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
790 ra_modified(as, left);
791 ra_rename(as, left, dest);
792 } else {
793 emit_movrr(as, ir, dest, left);
794 }
795 }
796 }
797 #else
798 /* Similar to ra_left, except we override any hints. */
799 static void ra_leftov(ASMState *as, Reg dest, IRRef lref)
800 {
801 IRIns *ir = IR(lref);
802 Reg left = ir->r;
803 if (ra_noreg(left)) {
804 ra_sethint(ir->r, dest); /* Propagate register hint. */
805 left = ra_allocref(as, lref,
806 (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR);
807 }
808 ra_noweak(as, left);
809 if (dest != left) {
810 /* Use register renaming if dest is the PHI reg. */
811 if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
812 ra_modified(as, left);
813 ra_rename(as, left, dest);
814 } else {
815 emit_movrr(as, ir, dest, left);
816 }
817 }
818 }
819 #endif
820
821 /* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
822 static void ra_destpair(ASMState *as, IRIns *ir)
823 {
824 Reg destlo = ir->r, desthi = (ir+1)->r;
825 IRIns *irx = (LJ_64 && !irt_is64(ir->t)) ? ir+1 : ir;
826 /* First spill unrelated refs blocking the destination registers. */
827 if (!rset_test(as->freeset, RID_RETLO) &&
828 destlo != RID_RETLO && desthi != RID_RETLO)
829 ra_restore(as, regcost_ref(as->cost[RID_RETLO]));
830 if (!rset_test(as->freeset, RID_RETHI) &&
831 destlo != RID_RETHI && desthi != RID_RETHI)
832 ra_restore(as, regcost_ref(as->cost[RID_RETHI]));
833 /* Next free the destination registers (if any). */
834 if (ra_hasreg(destlo)) {
835 ra_free(as, destlo);
836 ra_modified(as, destlo);
837 } else {
838 destlo = RID_RETLO;
839 }
840 if (ra_hasreg(desthi)) {
841 ra_free(as, desthi);
842 ra_modified(as, desthi);
843 } else {
844 desthi = RID_RETHI;
845 }
846 /* Check for conflicts and shuffle the registers as needed. */
847 if (destlo == RID_RETHI) {
848 if (desthi == RID_RETLO) {
849 #if LJ_TARGET_X86ORX64
850 *--as->mcp = XI_XCHGa + RID_RETHI;
851 if (LJ_64 && irt_is64(irx->t)) *--as->mcp = 0x48;
852 #else
853 emit_movrr(as, irx, RID_RETHI, RID_TMP);
854 emit_movrr(as, irx, RID_RETLO, RID_RETHI);
855 emit_movrr(as, irx, RID_TMP, RID_RETLO);
856 #endif
857 } else {
858 emit_movrr(as, irx, RID_RETHI, RID_RETLO);
859 if (desthi != RID_RETHI) emit_movrr(as, irx, desthi, RID_RETHI);
860 }
861 } else if (desthi == RID_RETLO) {
862 emit_movrr(as, irx, RID_RETLO, RID_RETHI);
863 if (destlo != RID_RETLO) emit_movrr(as, irx, destlo, RID_RETLO);
864 } else {
865 if (desthi != RID_RETHI) emit_movrr(as, irx, desthi, RID_RETHI);
866 if (destlo != RID_RETLO) emit_movrr(as, irx, destlo, RID_RETLO);
867 }
868 /* Restore spill slots (if any). */
869 if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI);
870 if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO);
871 }
872
873 /* -- Snapshot handling --------- ----------------------------------------- */
874
875 /* Can we rematerialize a KNUM instead of forcing a spill? */
876 static int asm_snap_canremat(ASMState *as)
877 {
878 Reg r;
879 for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++)
880 if (irref_isk(regcost_ref(as->cost[r])))
881 return 1;
882 return 0;
883 }
884
885 /* Check whether a sunk store corresponds to an allocation. */
886 static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs)
887 {
888 if (irs->s == 255) {
889 if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
890 irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
891 IRIns *irk = IR(irs->op1);
892 if (irk->o == IR_AREF || irk->o == IR_HREFK)
893 irk = IR(irk->op1);
894 return (IR(irk->op1) == ira);
895 }
896 return 0;
897 } else {
898 return (ira + irs->s == irs); /* Quick check. */
899 }
900 }
901
902 /* Allocate register or spill slot for a ref that escapes to a snapshot. */
903 static void asm_snap_alloc1(ASMState *as, IRRef ref)
904 {
905 IRIns *ir = IR(ref);
906 if (!irref_isk(ref) && ir->r != RID_SUNK) {
907 bloomset(as->snapfilt1, ref);
908 bloomset(as->snapfilt2, hashrot(ref, ref + HASH_BIAS));
909 if (ra_used(ir)) return;
910 if (ir->r == RID_SINK) {
911 ir->r = RID_SUNK;
912 #if LJ_HASFFI
913 if (ir->o == IR_CNEWI) { /* Allocate CNEWI value. */
914 asm_snap_alloc1(as, ir->op2);
915 if (LJ_32 && (ir+1)->o == IR_HIOP)
916 asm_snap_alloc1(as, (ir+1)->op2);
917 } else
918 #endif
919 { /* Allocate stored values for TNEW, TDUP and CNEW. */
920 IRIns *irs;
921 lj_assertA(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW,
922 "sink of IR %04d has bad op %d", ref - REF_BIAS, ir->o);
923 for (irs = IR(as->snapref-1); irs > ir; irs--)
924 if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
925 lj_assertA(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
926 irs->o == IR_FSTORE || irs->o == IR_XSTORE,
927 "sunk store IR %04d has bad op %d",
928 (int)(irs - as->ir) - REF_BIAS, irs->o);
929 asm_snap_alloc1(as, irs->op2);
930 if (LJ_32 && (irs+1)->o == IR_HIOP)
931 asm_snap_alloc1(as, (irs+1)->op2);
932 }
933 }
934 } else {
935 RegSet allow;
936 if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) {
937 IRIns *irc;
938 for (irc = IR(as->curins); irc > ir; irc--)
939 if ((irc->op1 == ref || irc->op2 == ref) &&
940 !(irc->r == RID_SINK || irc->r == RID_SUNK))
941 goto nosink; /* Don't sink conversion if result is used. */
942 asm_snap_alloc1(as, ir->op1);
943 return;
944 }
945 nosink:
946 allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR;
947 if ((as->freeset & allow) ||
948 (allow == RSET_FPR && asm_snap_canremat(as))) {
949 /* Get a weak register if we have a free one or can rematerialize. */
950 Reg r = ra_allocref(as, ref, allow); /* Allocate a register. */
951 if (!irt_isphi(ir->t))
952 ra_weak(as, r); /* But mark it as weakly referenced. */
953 checkmclim(as);
954 RA_DBGX((as, "snapreg $f $r", ref, ir->r));
955 } else {
956 ra_spill(as, ir); /* Otherwise force a spill slot. */
957 RA_DBGX((as, "snapspill $f $s", ref, ir->s));
958 }
959 }
960 }
961 }
962
963 /* Allocate refs escaping to a snapshot. */
964 static void asm_snap_alloc(ASMState *as, int snapno)
965 {
966 SnapShot *snap = &as->T->snap[snapno];
967 SnapEntry *map = &as->T->snapmap[snap->mapofs];
968 MSize n, nent = snap->nent;
969 as->snapfilt1 = as->snapfilt2 = 0;
970 for (n = 0; n < nent; n++) {
971 SnapEntry sn = map[n];
972 IRRef ref = snap_ref(sn);
973 if (!irref_isk(ref)) {
974 asm_snap_alloc1(as, ref);
975 if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
976 lj_assertA(irt_type(IR(ref+1)->t) == IRT_SOFTFP,
977 "snap %d[%d] points to bad SOFTFP IR %04d",
978 snapno, n, ref - REF_BIAS);
979 asm_snap_alloc1(as, ref+1);
980 }
981 }
982 }
983 }
984
985 /* All guards for a snapshot use the same exitno. This is currently the
986 ** same as the snapshot number. Since the exact origin of the exit cannot
987 ** be determined, all guards for the same snapshot must exit with the same
988 ** RegSP mapping.
989 ** A renamed ref which has been used in a prior guard for the same snapshot
990 ** would cause an inconsistency. The easy way out is to force a spill slot.
991 */
992 static int asm_snap_checkrename(ASMState *as, IRRef ren)
993 {
994 if (bloomtest(as->snapfilt1, ren) &&
995 bloomtest(as->snapfilt2, hashrot(ren, ren + HASH_BIAS))) {
996 IRIns *ir = IR(ren);
997 ra_spill(as, ir); /* Register renamed, so force a spill slot. */
998 RA_DBGX((as, "snaprensp $f $s", ren, ir->s));
999 return 1; /* Found. */
1000 }
1001 return 0; /* Not found. */
1002 }
1003
1004 /* Prepare snapshot for next guard or throwing instruction. */
1005 static void asm_snap_prep(ASMState *as)
1006 {
1007 if (as->snapalloc) {
1008 /* Alloc on first invocation for each snapshot. */
1009 as->snapalloc = 0;
1010 asm_snap_alloc(as, as->snapno);
1011 as->snaprename = as->T->nins;
1012 } else {
1013 /* Check any renames above the highwater mark. */
1014 for (; as->snaprename < as->T->nins; as->snaprename++) {
1015 IRIns *ir = &as->T->ir[as->snaprename];
1016 if (asm_snap_checkrename(as, ir->op1))
1017 ir->op2 = REF_BIAS-1; /* Kill rename. */
1018 }
1019 }
1020 }
1021
1022 /* Move to previous snapshot when we cross the current snapshot ref. */
1023 static void asm_snap_prev(ASMState *as)
1024 {
1025 if (as->curins < as->snapref) {
1026 uintptr_t ofs = (uintptr_t)(as->mctoporig - as->mcp);
1027 if (ofs >= 0x10000) lj_trace_err(as->J, LJ_TRERR_MCODEOV);
1028 do {
1029 if (as->snapno == 0) return;
1030 as->snapno--;
1031 as->snapref = as->T->snap[as->snapno].ref;
1032 as->T->snap[as->snapno].mcofs = (uint16_t)ofs; /* Remember mcode ofs. */
1033 } while (as->curins < as->snapref); /* May have no ins inbetween. */
1034 as->snapalloc = 1;
1035 }
1036 }
1037
1038 /* Fixup snapshot mcode offsetst. */
1039 static void asm_snap_fixup_mcofs(ASMState *as)
1040 {
1041 uint32_t sz = (uint32_t)(as->mctoporig - as->mcp);
1042 SnapShot *snap = as->T->snap;
1043 SnapNo i;
1044 for (i = as->T->nsnap-1; i > 0; i--) {
1045 /* Compute offset from mcode start and store in correct snapshot. */
1046 snap[i].mcofs = (uint16_t)(sz - snap[i-1].mcofs);
1047 }
1048 snap[0].mcofs = 0;
1049 }
1050
1051 /* -- Miscellaneous helpers ----------------------------------------------- */
1052
1053 /* Calculate stack adjustment. */
1054 static int32_t asm_stack_adjust(ASMState *as)
1055 {
1056 if (as->evenspill <= SPS_FIXED)
1057 return 0;
1058 return sps_scale(sps_align(as->evenspill));
1059 }
1060
1061 /* Must match with hash*() in lj_tab.c. */
1062 static uint32_t ir_khash(ASMState *as, IRIns *ir)
1063 {
1064 uint32_t lo, hi;
1065 UNUSED(as);
1066 if (irt_isstr(ir->t)) {
1067 return ir_kstr(ir)->sid;
1068 } else if (irt_isnum(ir->t)) {
1069 lo = ir_knum(ir)->u32.lo;
1070 hi = ir_knum(ir)->u32.hi << 1;
1071 } else if (irt_ispri(ir->t)) {
1072 lj_assertA(!irt_isnil(ir->t), "hash of nil key");
1073 return irt_type(ir->t)-IRT_FALSE;
1074 } else {
1075 lj_assertA(irt_isgcv(ir->t), "hash of bad IR type %d", irt_type(ir->t));
1076 lo = u32ptr(ir_kgc(ir));
1077 #if LJ_GC64
1078 hi = (uint32_t)(u64ptr(ir_kgc(ir)) >> 32) | (irt_toitype(ir->t) << 15);
1079 #else
1080 hi = lo + HASH_BIAS;
1081 #endif
1082 }
1083 return hashrot(lo, hi);
1084 }
1085
1086 /* -- Allocations --------------------------------------------------------- */
1087
1088 static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args);
1089 static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci);
1090
1091 static void asm_snew(ASMState *as, IRIns *ir)
1092 {
1093 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new];
1094 IRRef args[3];
1095 asm_snap_prep(as);
1096 args[0] = ASMREF_L; /* lua_State *L */
1097 args[1] = ir->op1; /* const char *str */
1098 args[2] = ir->op2; /* size_t len */
1099 as->gcsteps++;
1100 asm_setupresult(as, ir, ci); /* GCstr * */
1101 asm_gencall(as, ci, args);
1102 }
1103
1104 static void asm_tnew(ASMState *as, IRIns *ir)
1105 {
1106 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1];
1107 IRRef args[2];
1108 asm_snap_prep(as);
1109 args[0] = ASMREF_L; /* lua_State *L */
1110 args[1] = ASMREF_TMP1; /* uint32_t ahsize */
1111 as->gcsteps++;
1112 asm_setupresult(as, ir, ci); /* GCtab * */
1113 asm_gencall(as, ci, args);
1114 ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1));
1115 }
1116
1117 static void asm_tdup(ASMState *as, IRIns *ir)
1118 {
1119 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup];
1120 IRRef args[2];
1121 asm_snap_prep(as);
1122 args[0] = ASMREF_L; /* lua_State *L */
1123 args[1] = ir->op1; /* const GCtab *kt */
1124 as->gcsteps++;
1125 asm_setupresult(as, ir, ci); /* GCtab * */
1126 asm_gencall(as, ci, args);
1127 }
1128
1129 static void asm_gc_check(ASMState *as);
1130
1131 /* Explicit GC step. */
1132 static void asm_gcstep(ASMState *as, IRIns *ir)
1133 {
1134 IRIns *ira;
1135 for (ira = IR(as->stopins+1); ira < ir; ira++)
1136 if ((ira->o == IR_TNEW || ira->o == IR_TDUP ||
1137 (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) &&
1138 ra_used(ira))
1139 as->gcsteps++;
1140 if (as->gcsteps)
1141 asm_gc_check(as);
1142 as->gcsteps = 0x80000000; /* Prevent implicit GC check further up. */
1143 }
1144
1145 /* -- Buffer operations --------------------------------------------------- */
1146
1147 static void asm_tvptr(ASMState *as, Reg dest, IRRef ref, MSize mode);
1148 #if LJ_HASBUFFER
1149 static void asm_bufhdr_write(ASMState *as, Reg sb);
1150 #endif
1151
1152 static void asm_bufhdr(ASMState *as, IRIns *ir)
1153 {
1154 Reg sb = ra_dest(as, ir, RSET_GPR);
1155 switch (ir->op2) {
1156 case IRBUFHDR_RESET: {
1157 Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
1158 IRIns irbp;
1159 irbp.ot = IRT(0, IRT_PTR); /* Buffer data pointer type. */
1160 emit_storeofs(as, &irbp, tmp, sb, offsetof(SBuf, w));
1161 emit_loadofs(as, &irbp, tmp, sb, offsetof(SBuf, b));
1162 break;
1163 }
1164 case IRBUFHDR_APPEND: {
1165 /* Rematerialize const buffer pointer instead of likely spill. */
1166 IRIns *irp = IR(ir->op1);
1167 if (!(ra_hasreg(irp->r) || irp == ir-1 ||
1168 (irp == ir-2 && !ra_used(ir-1)))) {
1169 while (!(irp->o == IR_BUFHDR && irp->op2 == IRBUFHDR_RESET))
1170 irp = IR(irp->op1);
1171 if (irref_isk(irp->op1)) {
1172 ra_weak(as, ra_allocref(as, ir->op1, RSET_GPR));
1173 ir = irp;
1174 }
1175 }
1176 break;
1177 }
1178 #if LJ_HASBUFFER
1179 case IRBUFHDR_WRITE:
1180 asm_bufhdr_write(as, sb);
1181 break;
1182 #endif
1183 default: lj_assertA(0, "bad BUFHDR op2 %d", ir->op2); break;
1184 }
1185 #if LJ_TARGET_X86ORX64
1186 ra_left(as, sb, ir->op1);
1187 #else
1188 ra_leftov(as, sb, ir->op1);
1189 #endif
1190 }
1191
1192 static void asm_bufput(ASMState *as, IRIns *ir)
1193 {
1194 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_putstr];
1195 IRRef args[3];
1196 IRIns *irs;
1197 int kchar = -129;
1198 args[0] = ir->op1; /* SBuf * */
1199 args[1] = ir->op2; /* GCstr * */
1200 irs = IR(ir->op2);
1201 lj_assertA(irt_isstr(irs->t),
1202 "BUFPUT of non-string IR %04d", ir->op2 - REF_BIAS);
1203 if (irs->o == IR_KGC) {
1204 GCstr *s = ir_kstr(irs);
1205 if (s->len == 1) { /* Optimize put of single-char string constant. */
1206 kchar = (int8_t)strdata(s)[0]; /* Signed! */
1207 args[1] = ASMREF_TMP1; /* int, truncated to char */
1208 ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1209 }
1210 } else if (mayfuse(as, ir->op2) && ra_noreg(irs->r)) {
1211 if (irs->o == IR_TOSTR) { /* Fuse number to string conversions. */
1212 if (irs->op2 == IRTOSTR_NUM) {
1213 args[1] = ASMREF_TMP1; /* TValue * */
1214 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum];
1215 } else {
1216 lj_assertA(irt_isinteger(IR(irs->op1)->t),
1217 "TOSTR of non-numeric IR %04d", irs->op1);
1218 args[1] = irs->op1; /* int */
1219 if (irs->op2 == IRTOSTR_INT)
1220 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint];
1221 else
1222 ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
1223 }
1224 } else if (irs->o == IR_SNEW) { /* Fuse string allocation. */
1225 args[1] = irs->op1; /* const void * */
1226 args[2] = irs->op2; /* MSize */
1227 ci = &lj_ir_callinfo[IRCALL_lj_buf_putmem];
1228 }
1229 }
1230 asm_setupresult(as, ir, ci); /* SBuf * */
1231 asm_gencall(as, ci, args);
1232 if (args[1] == ASMREF_TMP1) {
1233 Reg tmp = ra_releasetmp(as, ASMREF_TMP1);
1234 if (kchar == -129)
1235 asm_tvptr(as, tmp, irs->op1, IRTMPREF_IN1);
1236 else
1237 ra_allockreg(as, kchar, tmp);
1238 }
1239 }
1240
1241 static void asm_bufstr(ASMState *as, IRIns *ir)
1242 {
1243 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_tostr];
1244 IRRef args[1];
1245 args[0] = ir->op1; /* SBuf *sb */
1246 as->gcsteps++;
1247 asm_setupresult(as, ir, ci); /* GCstr * */
1248 asm_gencall(as, ci, args);
1249 }
1250
1251 /* -- Type conversions ---------------------------------------------------- */
1252
1253 static void asm_tostr(ASMState *as, IRIns *ir)
1254 {
1255 const CCallInfo *ci;
1256 IRRef args[2];
1257 asm_snap_prep(as);
1258 args[0] = ASMREF_L;
1259 as->gcsteps++;
1260 if (ir->op2 == IRTOSTR_NUM) {
1261 args[1] = ASMREF_TMP1; /* cTValue * */
1262 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_num];
1263 } else {
1264 args[1] = ir->op1; /* int32_t k */
1265 if (ir->op2 == IRTOSTR_INT)
1266 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_int];
1267 else
1268 ci = &lj_ir_callinfo[IRCALL_lj_strfmt_char];
1269 }
1270 asm_setupresult(as, ir, ci); /* GCstr * */
1271 asm_gencall(as, ci, args);
1272 if (ir->op2 == IRTOSTR_NUM)
1273 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1, IRTMPREF_IN1);
1274 }
1275
1276 #if LJ_32 && LJ_HASFFI && !LJ_SOFTFP && !LJ_TARGET_X86
1277 static void asm_conv64(ASMState *as, IRIns *ir)
1278 {
1279 IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
1280 IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
1281 IRCallID id;
1282 IRRef args[2];
1283 lj_assertA((ir-1)->o == IR_CONV && ir->o == IR_HIOP,
1284 "not a CONV/HIOP pair at IR %04d", (int)(ir - as->ir) - REF_BIAS);
1285 args[LJ_BE] = (ir-1)->op1;
1286 args[LJ_LE] = ir->op1;
1287 if (st == IRT_NUM || st == IRT_FLOAT) {
1288 id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
1289 ir--;
1290 } else {
1291 id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
1292 }
1293 {
1294 #if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
1295 CCallInfo cim = lj_ir_callinfo[id], *ci = &cim;
1296 cim.flags |= CCI_VARARG; /* These calls don't use the hard-float ABI! */
1297 #else
1298 const CCallInfo *ci = &lj_ir_callinfo[id];
1299 #endif
1300 asm_setupresult(as, ir, ci);
1301 asm_gencall(as, ci, args);
1302 }
1303 }
1304 #endif
1305
1306 /* -- Memory references --------------------------------------------------- */
1307
1308 static void asm_newref(ASMState *as, IRIns *ir)
1309 {
1310 const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
1311 IRRef args[3];
1312 if (ir->r == RID_SINK)
1313 return;
1314 asm_snap_prep(as);
1315 args[0] = ASMREF_L; /* lua_State *L */
1316 args[1] = ir->op1; /* GCtab *t */
1317 args[2] = ASMREF_TMP1; /* cTValue *key */
1318 asm_setupresult(as, ir, ci); /* TValue * */
1319 asm_gencall(as, ci, args);
1320 asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2, IRTMPREF_IN1);
1321 }
1322
1323 static void asm_tmpref(ASMState *as, IRIns *ir)
1324 {
1325 Reg r = ra_dest(as, ir, RSET_GPR);
1326 asm_tvptr(as, r, ir->op1, ir->op2);
1327 }
1328
1329 static void asm_lref(ASMState *as, IRIns *ir)
1330 {
1331 Reg r = ra_dest(as, ir, RSET_GPR);
1332 #if LJ_TARGET_X86ORX64
1333 ra_left(as, r, ASMREF_L);
1334 #else
1335 ra_leftov(as, r, ASMREF_L);
1336 #endif
1337 }
1338
1339 /* -- Calls --------------------------------------------------------------- */
1340
1341 /* Collect arguments from CALL* and CARG instructions. */
1342 static void asm_collectargs(ASMState *as, IRIns *ir,
1343 const CCallInfo *ci, IRRef *args)
1344 {
1345 uint32_t n = CCI_XNARGS(ci);
1346 /* Account for split args. */
1347 lj_assertA(n <= CCI_NARGS_MAX*2, "too many args %d to collect", n);
1348 if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
1349 while (n-- > 1) {
1350 ir = IR(ir->op1);
1351 lj_assertA(ir->o == IR_CARG, "malformed CALL arg tree");
1352 args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
1353 }
1354 args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
1355 lj_assertA(IR(ir->op1)->o != IR_CARG, "malformed CALL arg tree");
1356 }
1357
1358 /* Reconstruct CCallInfo flags for CALLX*. */
1359 static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
1360 {
1361 uint32_t nargs = 0;
1362 if (ir->op1 != REF_NIL) { /* Count number of arguments first. */
1363 IRIns *ira = IR(ir->op1);
1364 nargs++;
1365 while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
1366 }
1367 #if LJ_HASFFI
1368 if (IR(ir->op2)->o == IR_CARG) { /* Copy calling convention info. */
1369 CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
1370 CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
1371 nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
1372 #if LJ_TARGET_X86
1373 nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
1374 #endif
1375 }
1376 #endif
1377 return (nargs | (ir->t.irt << CCI_OTSHIFT));
1378 }
1379
1380 static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
1381 {
1382 const CCallInfo *ci = &lj_ir_callinfo[id];
1383 IRRef args[2];
1384 args[0] = ir->op1;
1385 args[1] = ir->op2;
1386 asm_setupresult(as, ir, ci);
1387 asm_gencall(as, ci, args);
1388 }
1389
1390 static void asm_call(ASMState *as, IRIns *ir)
1391 {
1392 IRRef args[CCI_NARGS_MAX];
1393 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
1394 asm_collectargs(as, ir, ci, args);
1395 asm_setupresult(as, ir, ci);
1396 asm_gencall(as, ci, args);
1397 }
1398
1399 /* -- PHI and loop handling ----------------------------------------------- */
1400
1401 /* Break a PHI cycle by renaming to a free register (evict if needed). */
1402 static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby,
1403 RegSet allow)
1404 {
1405 RegSet candidates = blocked & allow;
1406 if (candidates) { /* If this register file has candidates. */
1407 /* Note: the set for ra_pick cannot be empty, since each register file
1408 ** has some registers never allocated to PHIs.
1409 */
1410 Reg down, up = ra_pick(as, ~blocked & allow); /* Get a free register. */
1411 if (candidates & ~blockedby) /* Optimize shifts, else it's a cycle. */
1412 candidates = candidates & ~blockedby;
1413 down = rset_picktop(candidates); /* Pick candidate PHI register. */
1414 ra_rename(as, down, up); /* And rename it to the free register. */
1415 }
1416 }
1417
1418 /* PHI register shuffling.
1419 **
1420 ** The allocator tries hard to preserve PHI register assignments across
1421 ** the loop body. Most of the time this loop does nothing, since there
1422 ** are no register mismatches.
1423 **
1424 ** If a register mismatch is detected and ...
1425 ** - the register is currently free: rename it.
1426 ** - the register is blocked by an invariant: restore/remat and rename it.
1427 ** - Otherwise the register is used by another PHI, so mark it as blocked.
1428 **
1429 ** The renames are order-sensitive, so just retry the loop if a register
1430 ** is marked as blocked, but has been freed in the meantime. A cycle is
1431 ** detected if all of the blocked registers are allocated. To break the
1432 ** cycle rename one of them to a free register and retry.
1433 **
1434 ** Note that PHI spill slots are kept in sync and don't need to be shuffled.
1435 */
1436 static void asm_phi_shuffle(ASMState *as)
1437 {
1438 RegSet work;
1439
1440 /* Find and resolve PHI register mismatches. */
1441 for (;;) {
1442 RegSet blocked = RSET_EMPTY;
1443 RegSet blockedby = RSET_EMPTY;
1444 RegSet phiset = as->phiset;
1445 while (phiset) { /* Check all left PHI operand registers. */
1446 Reg r = rset_pickbot(phiset);
1447 IRIns *irl = IR(as->phireg[r]);
1448 Reg left = irl->r;
1449 if (r != left) { /* Mismatch? */
1450 if (!rset_test(as->freeset, r)) { /* PHI register blocked? */
1451 IRRef ref = regcost_ref(as->cost[r]);
1452 /* Blocked by other PHI (w/reg)? */
1453 if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) {
1454 rset_set(blocked, r);
1455 if (ra_hasreg(left))
1456 rset_set(blockedby, left);
1457 left = RID_NONE;
1458 } else { /* Otherwise grab register from invariant. */
1459 ra_restore(as, ref);
1460 checkmclim(as);
1461 }
1462 }
1463 if (ra_hasreg(left)) {
1464 ra_rename(as, left, r);
1465 checkmclim(as);
1466 }
1467 }
1468 rset_clear(phiset, r);
1469 }
1470 if (!blocked) break; /* Finished. */
1471 if (!(as->freeset & blocked)) { /* Break cycles if none are free. */
1472 asm_phi_break(as, blocked, blockedby, RSET_GPR);
1473 if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR);
1474 checkmclim(as);
1475 } /* Else retry some more renames. */
1476 }
1477
1478 /* Restore/remat invariants whose registers are modified inside the loop. */
1479 #if !LJ_SOFTFP
1480 work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR;
1481 while (work) {
1482 Reg r = rset_pickbot(work);
1483 ra_restore(as, regcost_ref(as->cost[r]));
1484 rset_clear(work, r);
1485 checkmclim(as);
1486 }
1487 #endif
1488 work = as->modset & ~(as->freeset | as->phiset);
1489 while (work) {
1490 Reg r = rset_pickbot(work);
1491 ra_restore(as, regcost_ref(as->cost[r]));
1492 rset_clear(work, r);
1493 checkmclim(as);
1494 }
1495
1496 /* Allocate and save all unsaved PHI regs and clear marks. */
1497 work = as->phiset;
1498 while (work) {
1499 Reg r = rset_picktop(work);
1500 IRRef lref = as->phireg[r];
1501 IRIns *ir = IR(lref);
1502 if (ra_hasspill(ir->s)) { /* Left PHI gained a spill slot? */
1503 irt_clearmark(ir->t); /* Handled here, so clear marker now. */
1504 ra_alloc1(as, lref, RID2RSET(r));
1505 ra_save(as, ir, r); /* Save to spill slot inside the loop. */
1506 checkmclim(as);
1507 }
1508 rset_clear(work, r);
1509 }
1510 }
1511
1512 /* Copy unsynced left/right PHI spill slots. Rarely needed. */
1513 static void asm_phi_copyspill(ASMState *as)
1514 {
1515 int need = 0;
1516 IRIns *ir;
1517 for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--)
1518 if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s))
1519 need |= irt_isfp(ir->t) ? 2 : 1; /* Unsynced spill slot? */
1520 if ((need & 1)) { /* Copy integer spill slots. */
1521 #if !LJ_TARGET_X86ORX64
1522 Reg r = RID_TMP;
1523 #else
1524 Reg r = RID_RET;
1525 if ((as->freeset & RSET_GPR))
1526 r = rset_pickbot((as->freeset & RSET_GPR));
1527 else
1528 emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1529 #endif
1530 for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1531 if (ra_hasspill(ir->s)) {
1532 IRIns *irl = IR(ir->op1);
1533 if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) {
1534 emit_spstore(as, irl, r, sps_scale(irl->s));
1535 emit_spload(as, ir, r, sps_scale(ir->s));
1536 checkmclim(as);
1537 }
1538 }
1539 }
1540 #if LJ_TARGET_X86ORX64
1541 if (!rset_test(as->freeset, r))
1542 emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1543 #endif
1544 }
1545 #if !LJ_SOFTFP
1546 if ((need & 2)) { /* Copy FP spill slots. */
1547 #if LJ_TARGET_X86
1548 Reg r = RID_XMM0;
1549 #else
1550 Reg r = RID_FPRET;
1551 #endif
1552 if ((as->freeset & RSET_FPR))
1553 r = rset_pickbot((as->freeset & RSET_FPR));
1554 if (!rset_test(as->freeset, r))
1555 emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1556 for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
1557 if (ra_hasspill(ir->s)) {
1558 IRIns *irl = IR(ir->op1);
1559 if (ra_hasspill(irl->s) && irt_isfp(ir->t)) {
1560 emit_spstore(as, irl, r, sps_scale(irl->s));
1561 emit_spload(as, ir, r, sps_scale(ir->s));
1562 checkmclim(as);
1563 }
1564 }
1565 }
1566 if (!rset_test(as->freeset, r))
1567 emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
1568 }
1569 #endif
1570 }
1571
1572 /* Emit renames for left PHIs which are only spilled outside the loop. */
1573 static void asm_phi_fixup(ASMState *as)
1574 {
1575 RegSet work = as->phiset;
1576 while (work) {
1577 Reg r = rset_picktop(work);
1578 IRRef lref = as->phireg[r];
1579 IRIns *ir = IR(lref);
1580 if (irt_ismarked(ir->t)) {
1581 irt_clearmark(ir->t);
1582 /* Left PHI gained a spill slot before the loop? */
1583 if (ra_hasspill(ir->s)) {
1584 ra_addrename(as, r, lref, as->loopsnapno);
1585 }
1586 }
1587 rset_clear(work, r);
1588 }
1589 }
1590
1591 /* Setup right PHI reference. */
1592 static void asm_phi(ASMState *as, IRIns *ir)
1593 {
1594 RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) &
1595 ~as->phiset;
1596 RegSet afree = (as->freeset & allow);
1597 IRIns *irl = IR(ir->op1);
1598 IRIns *irr = IR(ir->op2);
1599 if (ir->r == RID_SINK) /* Sink PHI. */
1600 return;
1601 /* Spill slot shuffling is not implemented yet (but rarely needed). */
1602 if (ra_hasspill(irl->s) || ra_hasspill(irr->s))
1603 lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1604 /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
1605 if ((afree & (afree-1))) { /* Two or more free registers? */
1606 Reg r;
1607 if (ra_noreg(irr->r)) { /* Get a register for the right PHI. */
1608 r = ra_allocref(as, ir->op2, allow);
1609 } else { /* Duplicate right PHI, need a copy (rare). */
1610 r = ra_scratch(as, allow);
1611 emit_movrr(as, irr, r, irr->r);
1612 }
1613 ir->r = (uint8_t)r;
1614 rset_set(as->phiset, r);
1615 as->phireg[r] = (IRRef1)ir->op1;
1616 irt_setmark(irl->t); /* Marks left PHIs _with_ register. */
1617 if (ra_noreg(irl->r))
1618 ra_sethint(irl->r, r); /* Set register hint for left PHI. */
1619 } else { /* Otherwise allocate a spill slot. */
1620 /* This is overly restrictive, but it triggers only on synthetic code. */
1621 if (ra_hasreg(irl->r) || ra_hasreg(irr->r))
1622 lj_trace_err(as->J, LJ_TRERR_NYIPHI);
1623 ra_spill(as, ir);
1624 irr->s = ir->s; /* Set right PHI spill slot. Sync left slot later. */
1625 }
1626 }
1627
1628 static void asm_loop_fixup(ASMState *as);
1629
1630 /* Middle part of a loop. */
1631 static void asm_loop(ASMState *as)
1632 {
1633 MCode *mcspill;
1634 /* LOOP is a guard, so the snapno is up to date. */
1635 as->loopsnapno = as->snapno;
1636 if (as->gcsteps)
1637 asm_gc_check(as);
1638 /* LOOP marks the transition from the variant to the invariant part. */
1639 as->flagmcp = as->invmcp = NULL;
1640 as->sectref = 0;
1641 if (!neverfuse(as)) as->fuseref = 0;
1642 asm_phi_shuffle(as);
1643 mcspill = as->mcp;
1644 asm_phi_copyspill(as);
1645 asm_loop_fixup(as);
1646 as->mcloop = as->mcp;
1647 RA_DBGX((as, "===== LOOP ====="));
1648 if (!as->realign) RA_DBG_FLUSH();
1649 if (as->mcp != mcspill)
1650 emit_jmp(as, mcspill);
1651 }
1652
1653 /* -- Target-specific assembler ------------------------------------------- */
1654
1655 #if LJ_TARGET_X86ORX64
1656 #include "lj_asm_x86.h"
1657 #elif LJ_TARGET_ARM
1658 #include "lj_asm_arm.h"
1659 #elif LJ_TARGET_ARM64
1660 #include "lj_asm_arm64.h"
1661 #elif LJ_TARGET_PPC
1662 #include "lj_asm_ppc.h"
1663 #elif LJ_TARGET_MIPS
1664 #include "lj_asm_mips.h"
1665 #else
1666 #error "Missing assembler for target CPU"
1667 #endif
1668
1669 /* -- Common instruction helpers ------------------------------------------ */
1670
1671 #if !LJ_SOFTFP32
1672 #if !LJ_TARGET_X86ORX64
1673 #define asm_ldexp(as, ir) asm_callid(as, ir, IRCALL_ldexp)
1674 #endif
1675
1676 static void asm_pow(ASMState *as, IRIns *ir)
1677 {
1678 #if LJ_64 && LJ_HASFFI
1679 if (!irt_isnum(ir->t))
1680 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_powi64 :
1681 IRCALL_lj_carith_powu64);
1682 else
1683 #endif
1684 asm_callid(as, ir, IRCALL_pow);
1685 }
1686
1687 static void asm_div(ASMState *as, IRIns *ir)
1688 {
1689 #if LJ_64 && LJ_HASFFI
1690 if (!irt_isnum(ir->t))
1691 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_divi64 :
1692 IRCALL_lj_carith_divu64);
1693 else
1694 #endif
1695 asm_fpdiv(as, ir);
1696 }
1697 #endif
1698
1699 static void asm_mod(ASMState *as, IRIns *ir)
1700 {
1701 #if LJ_64 && LJ_HASFFI
1702 if (!irt_isint(ir->t))
1703 asm_callid(as, ir, irt_isi64(ir->t) ? IRCALL_lj_carith_modi64 :
1704 IRCALL_lj_carith_modu64);
1705 else
1706 #endif
1707 asm_callid(as, ir, IRCALL_lj_vm_modi);
1708 }
1709
1710 static void asm_fuseequal(ASMState *as, IRIns *ir)
1711 {
1712 /* Fuse HREF + EQ/NE. */
1713 if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
1714 as->curins--;
1715 asm_href(as, ir-1, (IROp)ir->o);
1716 } else {
1717 asm_equal(as, ir);
1718 }
1719 }
1720
1721 static void asm_alen(ASMState *as, IRIns *ir)
1722 {
1723 asm_callid(as, ir, ir->op2 == REF_NIL ? IRCALL_lj_tab_len :
1724 IRCALL_lj_tab_len_hint);
1725 }
1726
1727 /* -- Instruction dispatch ------------------------------------------------ */
1728
1729 /* Assemble a single instruction. */
1730 static void asm_ir(ASMState *as, IRIns *ir)
1731 {
1732 switch ((IROp)ir->o) {
1733 /* Miscellaneous ops. */
1734 case IR_LOOP: asm_loop(as); break;
1735 case IR_NOP: case IR_XBAR:
1736 lj_assertA(!ra_used(ir),
1737 "IR %04d not unused", (int)(ir - as->ir) - REF_BIAS);
1738 break;
1739 case IR_USE:
1740 ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
1741 case IR_PHI: asm_phi(as, ir); break;
1742 case IR_HIOP: asm_hiop(as, ir); break;
1743 case IR_GCSTEP: asm_gcstep(as, ir); break;
1744 case IR_PROF: asm_prof(as, ir); break;
1745
1746 /* Guarded assertions. */
1747 case IR_LT: case IR_GE: case IR_LE: case IR_GT:
1748 case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
1749 case IR_ABC:
1750 asm_comp(as, ir);
1751 break;
1752 case IR_EQ: case IR_NE: asm_fuseequal(as, ir); break;
1753
1754 case IR_RETF: asm_retf(as, ir); break;
1755
1756 /* Bit ops. */
1757 case IR_BNOT: asm_bnot(as, ir); break;
1758 case IR_BSWAP: asm_bswap(as, ir); break;
1759 case IR_BAND: asm_band(as, ir); break;
1760 case IR_BOR: asm_bor(as, ir); break;
1761 case IR_BXOR: asm_bxor(as, ir); break;
1762 case IR_BSHL: asm_bshl(as, ir); break;
1763 case IR_BSHR: asm_bshr(as, ir); break;
1764 case IR_BSAR: asm_bsar(as, ir); break;
1765 case IR_BROL: asm_brol(as, ir); break;
1766 case IR_BROR: asm_bror(as, ir); break;
1767
1768 /* Arithmetic ops. */
1769 case IR_ADD: asm_add(as, ir); break;
1770 case IR_SUB: asm_sub(as, ir); break;
1771 case IR_MUL: asm_mul(as, ir); break;
1772 case IR_MOD: asm_mod(as, ir); break;
1773 case IR_NEG: asm_neg(as, ir); break;
1774 #if LJ_SOFTFP32
1775 case IR_DIV: case IR_POW: case IR_ABS:
1776 case IR_LDEXP: case IR_FPMATH: case IR_TOBIT:
1777 /* Unused for LJ_SOFTFP32. */
1778 lj_assertA(0, "IR %04d with unused op %d",
1779 (int)(ir - as->ir) - REF_BIAS, ir->o);
1780 break;
1781 #else
1782 case IR_DIV: asm_div(as, ir); break;
1783 case IR_POW: asm_pow(as, ir); break;
1784 case IR_ABS: asm_abs(as, ir); break;
1785 case IR_LDEXP: asm_ldexp(as, ir); break;
1786 case IR_FPMATH: asm_fpmath(as, ir); break;
1787 case IR_TOBIT: asm_tobit(as, ir); break;
1788 #endif
1789 case IR_MIN: asm_min(as, ir); break;
1790 case IR_MAX: asm_max(as, ir); break;
1791
1792 /* Overflow-checking arithmetic ops. */
1793 case IR_ADDOV: asm_addov(as, ir); break;
1794 case IR_SUBOV: asm_subov(as, ir); break;
1795 case IR_MULOV: asm_mulov(as, ir); break;
1796
1797 /* Memory references. */
1798 case IR_AREF: asm_aref(as, ir); break;
1799 case IR_HREF: asm_href(as, ir, 0); break;
1800 case IR_HREFK: asm_hrefk(as, ir); break;
1801 case IR_NEWREF: asm_newref(as, ir); break;
1802 case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
1803 case IR_FREF: asm_fref(as, ir); break;
1804 case IR_TMPREF: asm_tmpref(as, ir); break;
1805 case IR_STRREF: asm_strref(as, ir); break;
1806 case IR_LREF: asm_lref(as, ir); break;
1807
1808 /* Loads and stores. */
1809 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
1810 asm_ahuvload(as, ir);
1811 break;
1812 case IR_FLOAD: asm_fload(as, ir); break;
1813 case IR_XLOAD: asm_xload(as, ir); break;
1814 case IR_SLOAD: asm_sload(as, ir); break;
1815 case IR_ALEN: asm_alen(as, ir); break;
1816
1817 case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
1818 case IR_FSTORE: asm_fstore(as, ir); break;
1819 case IR_XSTORE: asm_xstore(as, ir); break;
1820
1821 /* Allocations. */
1822 case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
1823 case IR_TNEW: asm_tnew(as, ir); break;
1824 case IR_TDUP: asm_tdup(as, ir); break;
1825 case IR_CNEW: case IR_CNEWI:
1826 #if LJ_HASFFI
1827 asm_cnew(as, ir);
1828 #else
1829 lj_assertA(0, "IR %04d with unused op %d",
1830 (int)(ir - as->ir) - REF_BIAS, ir->o);
1831 #endif
1832 break;
1833
1834 /* Buffer operations. */
1835 case IR_BUFHDR: asm_bufhdr(as, ir); break;
1836 case IR_BUFPUT: asm_bufput(as, ir); break;
1837 case IR_BUFSTR: asm_bufstr(as, ir); break;
1838
1839 /* Write barriers. */
1840 case IR_TBAR: asm_tbar(as, ir); break;
1841 case IR_OBAR: asm_obar(as, ir); break;
1842
1843 /* Type conversions. */
1844 case IR_CONV: asm_conv(as, ir); break;
1845 case IR_TOSTR: asm_tostr(as, ir); break;
1846 case IR_STRTO: asm_strto(as, ir); break;
1847
1848 /* Calls. */
1849 case IR_CALLA:
1850 as->gcsteps++;
1851 /* fallthrough */
1852 case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
1853 case IR_CALLXS: asm_callx(as, ir); break;
1854 case IR_CARG: break;
1855
1856 default:
1857 setintV(&as->J->errinfo, ir->o);
1858 lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
1859 break;
1860 }
1861 }
1862
1863 /* -- Head of trace ------------------------------------------------------- */
1864
1865 /* Head of a root trace. */
1866 static void asm_head_root(ASMState *as)
1867 {
1868 int32_t spadj;
1869 asm_head_root_base(as);
1870 emit_setvmstate(as, (int32_t)as->T->traceno);
1871 spadj = asm_stack_adjust(as);
1872 as->T->spadjust = (uint16_t)spadj;
1873 emit_spsub(as, spadj);
1874 /* Root traces assume a checked stack for the starting proto. */
1875 as->T->topslot = gcref(as->T->startpt)->pt.framesize;
1876 }
1877
1878 /* Head of a side trace.
1879 **
1880 ** The current simplistic algorithm requires that all slots inherited
1881 ** from the parent are live in a register between pass 2 and pass 3. This
1882 ** avoids the complexity of stack slot shuffling. But of course this may
1883 ** overflow the register set in some cases and cause the dreaded error:
1884 ** "NYI: register coalescing too complex". A refined algorithm is needed.
1885 */
1886 static void asm_head_side(ASMState *as)
1887 {
1888 IRRef1 sloadins[RID_MAX];
1889 RegSet allow = RSET_ALL; /* Inverse of all coalesced registers. */
1890 RegSet live = RSET_EMPTY; /* Live parent registers. */
1891 RegSet pallow = RSET_GPR; /* Registers needed by the parent stack check. */
1892 Reg pbase;
1893 IRIns *irp = &as->parent->ir[REF_BASE]; /* Parent base. */
1894 int32_t spadj, spdelta;
1895 int pass2 = 0;
1896 int pass3 = 0;
1897 IRRef i;
1898
1899 if (as->snapno && as->topslot > as->parent->topslot) {
1900 /* Force snap #0 alloc to prevent register overwrite in stack check. */
1901 asm_snap_alloc(as, 0);
1902 }
1903 pbase = asm_head_side_base(as, irp);
1904 if (pbase != RID_NONE) {
1905 rset_clear(allow, pbase);
1906 rset_clear(pallow, pbase);
1907 }
1908
1909 /* Scan all parent SLOADs and collect register dependencies. */
1910 for (i = as->stopins; i > REF_BASE; i--) {
1911 IRIns *ir = IR(i);
1912 RegSP rs;
1913 lj_assertA((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
1914 (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL,
1915 "IR %04d has bad parent op %d",
1916 (int)(ir - as->ir) - REF_BIAS, ir->o);
1917 rs = as->parentmap[i - REF_FIRST];
1918 if (ra_hasreg(ir->r)) {
1919 rset_clear(allow, ir->r);
1920 if (ra_hasspill(ir->s)) {
1921 ra_save(as, ir, ir->r);
1922 checkmclim(as);
1923 }
1924 } else if (ra_hasspill(ir->s)) {
1925 irt_setmark(ir->t);
1926 pass2 = 1;
1927 }
1928 if (ir->r == rs) { /* Coalesce matching registers right now. */
1929 ra_free(as, ir->r);
1930 } else if (ra_hasspill(regsp_spill(rs))) {
1931 if (ra_hasreg(ir->r))
1932 pass3 = 1;
1933 } else if (ra_used(ir)) {
1934 sloadins[rs] = (IRRef1)i;
1935 rset_set(live, rs); /* Block live parent register. */
1936 }
1937 if (!ra_hasspill(regsp_spill(rs))) rset_clear(pallow, regsp_reg(rs));
1938 }
1939
1940 /* Calculate stack frame adjustment. */
1941 spadj = asm_stack_adjust(as);
1942 spdelta = spadj - (int32_t)as->parent->spadjust;
1943 if (spdelta < 0) { /* Don't shrink the stack frame. */
1944 spadj = (int32_t)as->parent->spadjust;
1945 spdelta = 0;
1946 }
1947 as->T->spadjust = (uint16_t)spadj;
1948
1949 /* Reload spilled target registers. */
1950 if (pass2) {
1951 for (i = as->stopins; i > REF_BASE; i--) {
1952 IRIns *ir = IR(i);
1953 if (irt_ismarked(ir->t)) {
1954 RegSet mask;
1955 Reg r;
1956 RegSP rs;
1957 irt_clearmark(ir->t);
1958 rs = as->parentmap[i - REF_FIRST];
1959 if (!ra_hasspill(regsp_spill(rs)))
1960 ra_sethint(ir->r, rs); /* Hint may be gone, set it again. */
1961 else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s))
1962 continue; /* Same spill slot, do nothing. */
1963 mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow;
1964 if (mask == RSET_EMPTY)
1965 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
1966 r = ra_allocref(as, i, mask);
1967 ra_save(as, ir, r);
1968 rset_clear(allow, r);
1969 if (r == rs) { /* Coalesce matching registers right now. */
1970 ra_free(as, r);
1971 rset_clear(live, r);
1972 } else if (ra_hasspill(regsp_spill(rs))) {
1973 pass3 = 1;
1974 }
1975 checkmclim(as);
1976 }
1977 }
1978 }
1979
1980 /* Store trace number and adjust stack frame relative to the parent. */
1981 emit_setvmstate(as, (int32_t)as->T->traceno);
1982 emit_spsub(as, spdelta);
1983
1984 #if !LJ_TARGET_X86ORX64
1985 /* Restore BASE register from parent spill slot. */
1986 if (ra_hasspill(irp->s))
1987 emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s));
1988 #endif
1989
1990 /* Restore target registers from parent spill slots. */
1991 if (pass3) {
1992 RegSet work = ~as->freeset & RSET_ALL;
1993 while (work) {
1994 Reg r = rset_pickbot(work);
1995 IRRef ref = regcost_ref(as->cost[r]);
1996 RegSP rs = as->parentmap[ref - REF_FIRST];
1997 rset_clear(work, r);
1998 if (ra_hasspill(regsp_spill(rs))) {
1999 int32_t ofs = sps_scale(regsp_spill(rs));
2000 ra_free(as, r);
2001 emit_spload(as, IR(ref), r, ofs);
2002 checkmclim(as);
2003 }
2004 }
2005 }
2006
2007 /* Shuffle registers to match up target regs with parent regs. */
2008 for (;;) {
2009 RegSet work;
2010
2011 /* Repeatedly coalesce free live registers by moving to their target. */
2012 while ((work = as->freeset & live) != RSET_EMPTY) {
2013 Reg rp = rset_pickbot(work);
2014 IRIns *ir = IR(sloadins[rp]);
2015 rset_clear(live, rp);
2016 rset_clear(allow, rp);
2017 ra_free(as, ir->r);
2018 emit_movrr(as, ir, ir->r, rp);
2019 checkmclim(as);
2020 }
2021
2022 /* We're done if no live registers remain. */
2023 if (live == RSET_EMPTY)
2024 break;
2025
2026 /* Break cycles by renaming one target to a temp. register. */
2027 if (live & RSET_GPR) {
2028 RegSet tmpset = as->freeset & ~live & allow & RSET_GPR;
2029 if (tmpset == RSET_EMPTY)
2030 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
2031 ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset));
2032 }
2033 if (!LJ_SOFTFP && (live & RSET_FPR)) {
2034 RegSet tmpset = as->freeset & ~live & allow & RSET_FPR;
2035 if (tmpset == RSET_EMPTY)
2036 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
2037 ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset));
2038 }
2039 checkmclim(as);
2040 /* Continue with coalescing to fix up the broken cycle(s). */
2041 }
2042
2043 /* Inherit top stack slot already checked by parent trace. */
2044 as->T->topslot = as->parent->topslot;
2045 if (as->topslot > as->T->topslot) { /* Need to check for higher slot? */
2046 #ifdef EXITSTATE_CHECKEXIT
2047 /* Highest exit + 1 indicates stack check. */
2048 ExitNo exitno = as->T->nsnap;
2049 #else
2050 /* Reuse the parent exit in the context of the parent trace. */
2051 ExitNo exitno = as->J->exitno;
2052 #endif
2053 as->T->topslot = (uint8_t)as->topslot; /* Remember for child traces. */
2054 asm_stack_check(as, as->topslot, irp, pallow, exitno);
2055 }
2056 }
2057
2058 /* -- Tail of trace ------------------------------------------------------- */
2059
2060 /* Get base slot for a snapshot. */
2061 static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
2062 {
2063 SnapEntry *map = &as->T->snapmap[snap->mapofs];
2064 MSize n;
2065 for (n = snap->nent; n > 0; n--) {
2066 SnapEntry sn = map[n-1];
2067 if ((sn & SNAP_FRAME)) {
2068 *gotframe = 1;
2069 return snap_slot(sn) - LJ_FR2;
2070 }
2071 }
2072 return 0;
2073 }
2074
2075 /* Link to another trace. */
2076 static void asm_tail_link(ASMState *as)
2077 {
2078 SnapNo snapno = as->T->nsnap-1; /* Last snapshot. */
2079 SnapShot *snap = &as->T->snap[snapno];
2080 int gotframe = 0;
2081 BCReg baseslot = asm_baseslot(as, snap, &gotframe);
2082
2083 as->topslot = snap->topslot;
2084 checkmclim(as);
2085 ra_allocref(as, REF_BASE, RID2RSET(RID_BASE));
2086
2087 if (as->T->link == 0) {
2088 /* Setup fixed registers for exit to interpreter. */
2089 const BCIns *pc = snap_pc(&as->T->snapmap[snap->mapofs + snap->nent]);
2090 int32_t mres;
2091 if (bc_op(*pc) == BC_JLOOP) { /* NYI: find a better way to do this. */
2092 BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
2093 if (bc_isret(bc_op(*retpc)))
2094 pc = retpc;
2095 }
2096 #if LJ_GC64
2097 emit_loadu64(as, RID_LPC, u64ptr(pc));
2098 #else
2099 ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
2100 ra_allockreg(as, i32ptr(pc), RID_LPC);
2101 #endif
2102 mres = (int32_t)(snap->nslots - baseslot - LJ_FR2);
2103 switch (bc_op(*pc)) {
2104 case BC_CALLM: case BC_CALLMT:
2105 mres -= (int32_t)(1 + LJ_FR2 + bc_a(*pc) + bc_c(*pc)); break;
2106 case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
2107 case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
2108 default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
2109 }
2110 ra_allockreg(as, mres, RID_RET); /* Return MULTRES or 0. */
2111 } else if (baseslot) {
2112 /* Save modified BASE for linking to trace with higher start frame. */
2113 emit_setgl(as, RID_BASE, jit_base);
2114 }
2115 emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);
2116
2117 if (as->J->ktrace) { /* Patch ktrace slot with the final GCtrace pointer. */
2118 setgcref(IR(as->J->ktrace)[LJ_GC64].gcr, obj2gco(as->J->curfinal));
2119 IR(as->J->ktrace)->o = IR_KGC;
2120 }
2121
2122 /* Sync the interpreter state with the on-trace state. */
2123 asm_stack_restore(as, snap);
2124
2125 /* Root traces that add frames need to check the stack at the end. */
2126 if (!as->parent && gotframe)
2127 asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno);
2128 }
2129
2130 /* -- Trace setup --------------------------------------------------------- */
2131
2132 /* Clear reg/sp for all instructions and add register hints. */
2133 static void asm_setup_regsp(ASMState *as)
2134 {
2135 GCtrace *T = as->T;
2136 int sink = T->sinktags;
2137 IRRef nins = T->nins;
2138 IRIns *ir, *lastir;
2139 int inloop;
2140 #if LJ_TARGET_ARM
2141 uint32_t rload = 0xa6402a64;
2142 #endif
2143
2144 ra_setup(as);
2145 #if LJ_TARGET_ARM64
2146 ra_setkref(as, RID_GL, (intptr_t)J2G(as->J));
2147 #endif
2148
2149 /* Clear reg/sp for constants. */
2150 for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++) {
2151 ir->prev = REGSP_INIT;
2152 if (irt_is64(ir->t) && ir->o != IR_KNULL) {
2153 #if LJ_GC64
2154 /* The false-positive of irt_is64() for ASMREF_L (REF_NIL) is OK here. */
2155 ir->i = 0; /* Will become non-zero only for RIP-relative addresses. */
2156 #else
2157 /* Make life easier for backends by putting address of constant in i. */
2158 ir->i = (int32_t)(intptr_t)(ir+1);
2159 #endif
2160 ir++;
2161 }
2162 }
2163
2164 /* REF_BASE is used for implicit references to the BASE register. */
2165 lastir->prev = REGSP_HINT(RID_BASE);
2166
2167 as->snaprename = nins;
2168 as->snapref = nins;
2169 as->snapno = T->nsnap;
2170 as->snapalloc = 0;
2171
2172 as->stopins = REF_BASE;
2173 as->orignins = nins;
2174 as->curins = nins;
2175
2176 /* Setup register hints for parent link instructions. */
2177 ir = IR(REF_FIRST);
2178 if (as->parent) {
2179 uint16_t *p;
2180 lastir = lj_snap_regspmap(as->J, as->parent, as->J->exitno, ir);
2181 if (lastir - ir > LJ_MAX_JSLOTS)
2182 lj_trace_err(as->J, LJ_TRERR_NYICOAL);
2183 as->stopins = (IRRef)((lastir-1) - as->ir);
2184 for (p = as->parentmap; ir < lastir; ir++) {
2185 RegSP rs = ir->prev;
2186 *p++ = (uint16_t)rs; /* Copy original parent RegSP to parentmap. */
2187 if (!ra_hasspill(regsp_spill(rs)))
2188 ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
2189 else
2190 ir->prev = REGSP_INIT;
2191 }
2192 }
2193
2194 inloop = 0;
2195 as->evenspill = SPS_FIRST;
2196 for (lastir = IR(nins); ir < lastir; ir++) {
2197 if (sink) {
2198 if (ir->r == RID_SINK)
2199 continue;
2200 if (ir->r == RID_SUNK) { /* Revert after ASM restart. */
2201 ir->r = RID_SINK;
2202 continue;
2203 }
2204 }
2205 switch (ir->o) {
2206 case IR_LOOP:
2207 inloop = 1;
2208 break;
2209 #if LJ_TARGET_ARM
2210 case IR_SLOAD:
2211 if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP))
2212 break;
2213 /* fallthrough */
2214 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2215 if (!LJ_SOFTFP && irt_isnum(ir->t)) break;
2216 ir->prev = (uint16_t)REGSP_HINT((rload & 15));
2217 rload = lj_ror(rload, 4);
2218 continue;
2219 case IR_TMPREF:
2220 if ((ir->op2 & IRTMPREF_OUT2) && as->evenspill < 4)
2221 as->evenspill = 4; /* TMPREF OUT2 needs two TValues on the stack. */
2222 break;
2223 #endif
2224 case IR_CALLXS: {
2225 CCallInfo ci;
2226 ci.flags = asm_callx_flags(as, ir);
2227 ir->prev = asm_setup_call_slots(as, ir, &ci);
2228 if (inloop)
2229 as->modset |= RSET_SCRATCH;
2230 continue;
2231 }
2232 case IR_CALLL:
2233 /* lj_vm_next needs two TValues on the stack. */
2234 #if LJ_TARGET_X64 && LJ_ABI_WIN
2235 if (ir->op2 == IRCALL_lj_vm_next && as->evenspill < SPS_FIRST + 4)
2236 as->evenspill = SPS_FIRST + 4;
2237 #else
2238 if (SPS_FIRST < 4 && ir->op2 == IRCALL_lj_vm_next && as->evenspill < 4)
2239 as->evenspill = 4;
2240 #endif
2241 /* fallthrough */
2242 case IR_CALLN: case IR_CALLA: case IR_CALLS: {
2243 const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
2244 ir->prev = asm_setup_call_slots(as, ir, ci);
2245 if (inloop)
2246 as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ?
2247 (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH;
2248 continue;
2249 }
2250 case IR_HIOP:
2251 switch ((ir-1)->o) {
2252 #if LJ_SOFTFP && LJ_TARGET_ARM
2253 case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2254 if (ra_hashint((ir-1)->r)) {
2255 ir->prev = (ir-1)->prev + 1;
2256 continue;
2257 }
2258 break;
2259 #endif
2260 #if !LJ_SOFTFP && LJ_NEED_FP64 && LJ_32 && LJ_HASFFI
2261 case IR_CONV:
2262 if (irt_isfp((ir-1)->t)) {
2263 ir->prev = REGSP_HINT(RID_FPRET);
2264 continue;
2265 }
2266 #endif
2267 /* fallthrough */
2268 case IR_CALLN: case IR_CALLL: case IR_CALLS: case IR_CALLXS:
2269 #if LJ_SOFTFP
2270 case IR_MIN: case IR_MAX:
2271 #endif
2272 (ir-1)->prev = REGSP_HINT(RID_RETLO);
2273 ir->prev = REGSP_HINT(RID_RETHI);
2274 continue;
2275 default:
2276 break;
2277 }
2278 break;
2279 #if LJ_SOFTFP
2280 case IR_MIN: case IR_MAX:
2281 if ((ir+1)->o != IR_HIOP) break;
2282 #endif
2283 /* fallthrough */
2284 /* C calls evict all scratch regs and return results in RID_RET. */
2285 case IR_SNEW: case IR_XSNEW: case IR_NEWREF: case IR_BUFPUT:
2286 if (REGARG_NUMGPR < 3 && as->evenspill < 3)
2287 as->evenspill = 3; /* lj_str_new and lj_tab_newkey need 3 args. */
2288 #if LJ_TARGET_X86 && LJ_HASFFI
2289 if (0) {
2290 case IR_CNEW:
2291 if (ir->op2 != REF_NIL && as->evenspill < 4)
2292 as->evenspill = 4; /* lj_cdata_newv needs 4 args. */
2293 }
2294 /* fallthrough */
2295 #else
2296 /* fallthrough */
2297 case IR_CNEW:
2298 #endif
2299 /* fallthrough */
2300 case IR_TNEW: case IR_TDUP: case IR_CNEWI: case IR_TOSTR:
2301 case IR_BUFSTR:
2302 ir->prev = REGSP_HINT(RID_RET);
2303 if (inloop)
2304 as->modset = RSET_SCRATCH;
2305 continue;
2306 case IR_STRTO: case IR_OBAR:
2307 if (inloop)
2308 as->modset = RSET_SCRATCH;
2309 break;
2310 #if !LJ_SOFTFP
2311 #if !LJ_TARGET_X86ORX64
2312 case IR_LDEXP:
2313 #endif
2314 #endif
2315 /* fallthrough */
2316 case IR_POW:
2317 if (!LJ_SOFTFP && irt_isnum(ir->t)) {
2318 if (inloop)
2319 as->modset |= RSET_SCRATCH;
2320 #if LJ_TARGET_X86
2321 if (irt_isnum(IR(ir->op2)->t)) {
2322 if (as->evenspill < 4) /* Leave room to call pow(). */
2323 as->evenspill = 4;
2324 }
2325 break;
2326 #else
2327 ir->prev = REGSP_HINT(RID_FPRET);
2328 continue;
2329 #endif
2330 }
2331 /* fallthrough */ /* for integer POW */
2332 case IR_DIV: case IR_MOD:
2333 if ((LJ_64 && LJ_SOFTFP) || !irt_isnum(ir->t)) {
2334 ir->prev = REGSP_HINT(RID_RET);
2335 if (inloop)
2336 as->modset |= (RSET_SCRATCH & RSET_GPR);
2337 continue;
2338 }
2339 break;
2340 #if LJ_64 && LJ_SOFTFP
2341 case IR_ADD: case IR_SUB: case IR_MUL:
2342 if (irt_isnum(ir->t)) {
2343 ir->prev = REGSP_HINT(RID_RET);
2344 if (inloop)
2345 as->modset |= (RSET_SCRATCH & RSET_GPR);
2346 continue;
2347 }
2348 break;
2349 #endif
2350 case IR_FPMATH:
2351 #if LJ_TARGET_X86ORX64
2352 if (ir->op2 <= IRFPM_TRUNC) {
2353 if (!(as->flags & JIT_F_SSE4_1)) {
2354 ir->prev = REGSP_HINT(RID_XMM0);
2355 if (inloop)
2356 as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
2357 continue;
2358 }
2359 break;
2360 }
2361 #endif
2362 if (inloop)
2363 as->modset |= RSET_SCRATCH;
2364 #if LJ_TARGET_X86
2365 break;
2366 #else
2367 ir->prev = REGSP_HINT(RID_FPRET);
2368 continue;
2369 #endif
2370 #if LJ_TARGET_X86ORX64
2371 /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
2372 case IR_BSHL: case IR_BSHR: case IR_BSAR:
2373 if ((as->flags & JIT_F_BMI2)) /* Except if BMI2 is available. */
2374 break;
2375 /* fallthrough */
2376 case IR_BROL: case IR_BROR:
2377 if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
2378 IR(ir->op2)->r = REGSP_HINT(RID_ECX);
2379 if (inloop)
2380 rset_set(as->modset, RID_ECX);
2381 }
2382 break;
2383 #endif
2384 /* Do not propagate hints across type conversions or loads. */
2385 case IR_TOBIT:
2386 case IR_XLOAD:
2387 #if !LJ_TARGET_ARM
2388 case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
2389 #endif
2390 break;
2391 case IR_CONV:
2392 if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM ||
2393 (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
2394 break;
2395 /* fallthrough */
2396 default:
2397 /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
2398 if (irref_isk(ir->op2) && !irref_isk(ir->op1) &&
2399 ra_hashint(regsp_reg(IR(ir->op1)->prev))) {
2400 ir->prev = IR(ir->op1)->prev;
2401 continue;
2402 }
2403 break;
2404 }
2405 ir->prev = REGSP_INIT;
2406 }
2407 if ((as->evenspill & 1))
2408 as->oddspill = as->evenspill++;
2409 else
2410 as->oddspill = 0;
2411 }
2412
2413 /* -- Assembler core ------------------------------------------------------ */
2414
2415 /* Assemble a trace. */
2416 void lj_asm_trace(jit_State *J, GCtrace *T)
2417 {
2418 ASMState as_;
2419 ASMState *as = &as_;
2420
2421 /* Remove nops/renames left over from ASM restart due to LJ_TRERR_MCODELM. */
2422 {
2423 IRRef nins = T->nins;
2424 IRIns *ir = &T->ir[nins-1];
2425 if (ir->o == IR_NOP || ir->o == IR_RENAME) {
2426 do { ir--; nins--; } while (ir->o == IR_NOP || ir->o == IR_RENAME);
2427 T->nins = nins;
2428 }
2429 }
2430
2431 /* Ensure an initialized instruction beyond the last one for HIOP checks. */
2432 /* This also allows one RENAME to be added without reallocating curfinal. */
2433 as->orignins = lj_ir_nextins(J);
2434 lj_ir_nop(&J->cur.ir[as->orignins]);
2435
2436 /* Setup initial state. Copy some fields to reduce indirections. */
2437 as->J = J;
2438 as->T = T;
2439 J->curfinal = lj_trace_alloc(J->L, T); /* This copies the IR, too. */
2440 as->flags = J->flags;
2441 as->loopref = J->loopref;
2442 as->realign = NULL;
2443 as->loopinv = 0;
2444 as->parent = J->parent ? traceref(J, J->parent) : NULL;
2445
2446 /* Reserve MCode memory. */
2447 as->mctop = as->mctoporig = lj_mcode_reserve(J, &as->mcbot);
2448 as->mcp = as->mctop;
2449 as->mclim = as->mcbot + MCLIM_REDZONE;
2450 asm_setup_target(as);
2451
2452 /*
2453 ** This is a loop, because the MCode may have to be (re-)assembled
2454 ** multiple times:
2455 **
2456 ** 1. as->realign is set (and the assembly aborted), if the arch-specific
2457 ** backend wants the MCode to be aligned differently.
2458 **
2459 ** This is currently only the case on x86/x64, where small loops get
2460 ** an aligned loop body plus a short branch. Not much effort is wasted,
2461 ** because the abort happens very quickly and only once.
2462 **
2463 ** 2. The IR is immovable, since the MCode embeds pointers to various
2464 ** constants inside the IR. But RENAMEs may need to be added to the IR
2465 ** during assembly, which might grow and reallocate the IR. We check
2466 ** at the end if the IR (in J->cur.ir) has actually grown, resize the
2467 ** copy (in J->curfinal.ir) and try again.
2468 **
2469 ** 95% of all traces have zero RENAMEs, 3% have one RENAME, 1.5% have
2470 ** 2 RENAMEs and only 0.5% have more than that. That's why we opt to
2471 ** always have one spare slot in the IR (see above), which means we
2472 ** have to redo the assembly for only ~2% of all traces.
2473 **
2474 ** Very, very rarely, this needs to be done repeatedly, since the
2475 ** location of constants inside the IR (actually, reachability from
2476 ** a global pointer) may affect register allocation and thus the
2477 ** number of RENAMEs.
2478 */
2479 for (;;) {
2480 as->mcp = as->mctop;
2481 #ifdef LUA_USE_ASSERT
2482 as->mcp_prev = as->mcp;
2483 #endif
2484 as->ir = J->curfinal->ir; /* Use the copied IR. */
2485 as->curins = J->cur.nins = as->orignins;
2486
2487 RA_DBG_START();
2488 RA_DBGX((as, "===== STOP ====="));
2489
2490 /* General trace setup. Emit tail of trace. */
2491 asm_tail_prep(as);
2492 as->mcloop = NULL;
2493 as->flagmcp = NULL;
2494 as->topslot = 0;
2495 as->gcsteps = 0;
2496 as->sectref = as->loopref;
2497 as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED;
2498 asm_setup_regsp(as);
2499 if (!as->loopref)
2500 asm_tail_link(as);
2501
2502 /* Assemble a trace in linear backwards order. */
2503 for (as->curins--; as->curins > as->stopins; as->curins--) {
2504 IRIns *ir = IR(as->curins);
2505 /* 64 bit types handled by SPLIT for 32 bit archs. */
2506 lj_assertA(!(LJ_32 && irt_isint64(ir->t)),
2507 "IR %04d has unsplit 64 bit type",
2508 (int)(ir - as->ir) - REF_BIAS);
2509 asm_snap_prev(as);
2510 if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
2511 continue; /* Dead-code elimination can be soooo easy. */
2512 if (irt_isguard(ir->t))
2513 asm_snap_prep(as);
2514 RA_DBG_REF();
2515 checkmclim(as);
2516 asm_ir(as, ir);
2517 }
2518
2519 if (as->realign && J->curfinal->nins >= T->nins)
2520 continue; /* Retry in case only the MCode needs to be realigned. */
2521
2522 /* Emit head of trace. */
2523 RA_DBG_REF();
2524 checkmclim(as);
2525 if (as->gcsteps > 0) {
2526 as->curins = as->T->snap[0].ref;
2527 asm_snap_prep(as); /* The GC check is a guard. */
2528 asm_gc_check(as);
2529 as->curins = as->stopins;
2530 }
2531 ra_evictk(as);
2532 if (as->parent)
2533 asm_head_side(as);
2534 else
2535 asm_head_root(as);
2536 asm_phi_fixup(as);
2537
2538 if (J->curfinal->nins >= T->nins) { /* IR didn't grow? */
2539 lj_assertA(J->curfinal->nk == T->nk, "unexpected IR constant growth");
2540 memcpy(J->curfinal->ir + as->orignins, T->ir + as->orignins,
2541 (T->nins - as->orignins) * sizeof(IRIns)); /* Copy RENAMEs. */
2542 T->nins = J->curfinal->nins;
2543 /* Fill mcofs of any unprocessed snapshots. */
2544 as->curins = REF_FIRST;
2545 asm_snap_prev(as);
2546 break; /* Done. */
2547 }
2548
2549 /* Otherwise try again with a bigger IR. */
2550 lj_trace_free(J2G(J), J->curfinal);
2551 J->curfinal = NULL; /* In case lj_trace_alloc() OOMs. */
2552 J->curfinal = lj_trace_alloc(J->L, T);
2553 as->realign = NULL;
2554 }
2555
2556 RA_DBGX((as, "===== START ===="));
2557 RA_DBG_FLUSH();
2558 if (as->freeset != RSET_ALL)
2559 lj_trace_err(as->J, LJ_TRERR_BADRA); /* Ouch! Should never happen. */
2560
2561 /* Set trace entry point before fixing up tail to allow link to self. */
2562 T->mcode = as->mcp;
2563 T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
2564 if (as->loopref)
2565 asm_loop_tail_fixup(as);
2566 else
2567 asm_tail_fixup(as, T->link); /* Note: this may change as->mctop! */
2568 T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
2569 asm_snap_fixup_mcofs(as);
2570 #if LJ_TARGET_MCODE_FIXUP
2571 asm_mcode_fixup(T->mcode, T->szmcode);
2572 #endif
2573 lj_mcode_sync(T->mcode, as->mctoporig);
2574 }
2575
2576 #undef IR
2577
2578 #endif