Mercurial
comparison third_party/luajit/src/lj_mcode.c @ 186:8cf4ec5e2191 hg-web
Fixed merge conflict.
| author | MrJuneJune <me@mrjunejune.com> |
|---|---|
| date | Fri, 23 Jan 2026 22:38:59 -0800 |
| parents | 94705b5986b3 |
| children |
comparison
equal
deleted
inserted
replaced
| 176:fed99fc04e12 | 186:8cf4ec5e2191 |
|---|---|
| 1 /* | |
| 2 ** Machine code management. | |
| 3 ** Copyright (C) 2005-2023 Mike Pall. See Copyright Notice in luajit.h | |
| 4 */ | |
| 5 | |
| 6 #define lj_mcode_c | |
| 7 #define LUA_CORE | |
| 8 | |
| 9 #include "lj_obj.h" | |
| 10 #if LJ_HASJIT | |
| 11 #include "lj_gc.h" | |
| 12 #include "lj_err.h" | |
| 13 #include "lj_jit.h" | |
| 14 #include "lj_mcode.h" | |
| 15 #include "lj_trace.h" | |
| 16 #include "lj_dispatch.h" | |
| 17 #include "lj_prng.h" | |
| 18 #endif | |
| 19 #if LJ_HASJIT || LJ_HASFFI | |
| 20 #include "lj_vm.h" | |
| 21 #endif | |
| 22 | |
| 23 /* -- OS-specific functions ----------------------------------------------- */ | |
| 24 | |
| 25 #if LJ_HASJIT || LJ_HASFFI | |
| 26 | |
| 27 /* Define this if you want to run LuaJIT with Valgrind. */ | |
| 28 #ifdef LUAJIT_USE_VALGRIND | |
| 29 #include <valgrind/valgrind.h> | |
| 30 #endif | |
| 31 | |
| 32 #if LJ_TARGET_IOS | |
| 33 void sys_icache_invalidate(void *start, size_t len); | |
| 34 #endif | |
| 35 | |
| 36 /* Synchronize data/instruction cache. */ | |
| 37 void lj_mcode_sync(void *start, void *end) | |
| 38 { | |
| 39 #ifdef LUAJIT_USE_VALGRIND | |
| 40 VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start); | |
| 41 #endif | |
| 42 #if LJ_TARGET_X86ORX64 | |
| 43 UNUSED(start); UNUSED(end); | |
| 44 #elif LJ_TARGET_IOS | |
| 45 sys_icache_invalidate(start, (char *)end-(char *)start); | |
| 46 #elif LJ_TARGET_PPC | |
| 47 lj_vm_cachesync(start, end); | |
| 48 #elif defined(__GNUC__) || defined(__clang__) | |
| 49 __clear_cache(start, end); | |
| 50 #else | |
| 51 #error "Missing builtin to flush instruction cache" | |
| 52 #endif | |
| 53 } | |
| 54 | |
| 55 #endif | |
| 56 | |
| 57 #if LJ_HASJIT | |
| 58 | |
| 59 #if LJ_TARGET_WINDOWS | |
| 60 | |
| 61 #define WIN32_LEAN_AND_MEAN | |
| 62 #include <windows.h> | |
| 63 | |
| 64 #define MCPROT_RW PAGE_READWRITE | |
| 65 #define MCPROT_RX PAGE_EXECUTE_READ | |
| 66 #define MCPROT_RWX PAGE_EXECUTE_READWRITE | |
| 67 | |
| 68 static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot) | |
| 69 { | |
| 70 void *p = LJ_WIN_VALLOC((void *)hint, sz, | |
| 71 MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot); | |
| 72 if (!p && !hint) | |
| 73 lj_trace_err(J, LJ_TRERR_MCODEAL); | |
| 74 return p; | |
| 75 } | |
| 76 | |
| 77 static void mcode_free(jit_State *J, void *p, size_t sz) | |
| 78 { | |
| 79 UNUSED(J); UNUSED(sz); | |
| 80 VirtualFree(p, 0, MEM_RELEASE); | |
| 81 } | |
| 82 | |
| 83 static int mcode_setprot(void *p, size_t sz, DWORD prot) | |
| 84 { | |
| 85 DWORD oprot; | |
| 86 return !LJ_WIN_VPROTECT(p, sz, prot, &oprot); | |
| 87 } | |
| 88 | |
| 89 #elif LJ_TARGET_POSIX | |
| 90 | |
| 91 #include <sys/mman.h> | |
| 92 | |
| 93 #ifndef MAP_ANONYMOUS | |
| 94 #define MAP_ANONYMOUS MAP_ANON | |
| 95 #endif | |
| 96 | |
| 97 #define MCPROT_RW (PROT_READ|PROT_WRITE) | |
| 98 #define MCPROT_RX (PROT_READ|PROT_EXEC) | |
| 99 #define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC) | |
| 100 #ifdef PROT_MPROTECT | |
| 101 #define MCPROT_CREATE (PROT_MPROTECT(MCPROT_RWX)) | |
| 102 #else | |
| 103 #define MCPROT_CREATE 0 | |
| 104 #endif | |
| 105 | |
| 106 static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) | |
| 107 { | |
| 108 void *p = mmap((void *)hint, sz, prot|MCPROT_CREATE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); | |
| 109 if (p == MAP_FAILED) { | |
| 110 if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL); | |
| 111 p = NULL; | |
| 112 } | |
| 113 return p; | |
| 114 } | |
| 115 | |
| 116 static void mcode_free(jit_State *J, void *p, size_t sz) | |
| 117 { | |
| 118 UNUSED(J); | |
| 119 munmap(p, sz); | |
| 120 } | |
| 121 | |
| 122 static int mcode_setprot(void *p, size_t sz, int prot) | |
| 123 { | |
| 124 return mprotect(p, sz, prot); | |
| 125 } | |
| 126 | |
| 127 #else | |
| 128 | |
| 129 #error "Missing OS support for explicit placement of executable memory" | |
| 130 | |
| 131 #endif | |
| 132 | |
| 133 /* -- MCode area protection ----------------------------------------------- */ | |
| 134 | |
| 135 #if LUAJIT_SECURITY_MCODE == 0 | |
| 136 | |
| 137 /* Define this ONLY if page protection twiddling becomes a bottleneck. | |
| 138 ** | |
| 139 ** It's generally considered to be a potential security risk to have | |
| 140 ** pages with simultaneous write *and* execute access in a process. | |
| 141 ** | |
| 142 ** Do not even think about using this mode for server processes or | |
| 143 ** apps handling untrusted external data. | |
| 144 ** | |
| 145 ** The security risk is not in LuaJIT itself -- but if an adversary finds | |
| 146 ** any *other* flaw in your C application logic, then any RWX memory pages | |
| 147 ** simplify writing an exploit considerably. | |
| 148 */ | |
| 149 #define MCPROT_GEN MCPROT_RWX | |
| 150 #define MCPROT_RUN MCPROT_RWX | |
| 151 | |
| 152 static void mcode_protect(jit_State *J, int prot) | |
| 153 { | |
| 154 UNUSED(J); UNUSED(prot); UNUSED(mcode_setprot); | |
| 155 } | |
| 156 | |
| 157 #else | |
| 158 | |
| 159 /* This is the default behaviour and much safer: | |
| 160 ** | |
| 161 ** Most of the time the memory pages holding machine code are executable, | |
| 162 ** but NONE of them is writable. | |
| 163 ** | |
| 164 ** The current memory area is marked read-write (but NOT executable) only | |
| 165 ** during the short time window while the assembler generates machine code. | |
| 166 */ | |
| 167 #define MCPROT_GEN MCPROT_RW | |
| 168 #define MCPROT_RUN MCPROT_RX | |
| 169 | |
| 170 /* Protection twiddling failed. Probably due to kernel security. */ | |
| 171 static LJ_NORET LJ_NOINLINE void mcode_protfail(jit_State *J) | |
| 172 { | |
| 173 lua_CFunction panic = J2G(J)->panic; | |
| 174 if (panic) { | |
| 175 lua_State *L = J->L; | |
| 176 setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT)); | |
| 177 panic(L); | |
| 178 } | |
| 179 exit(EXIT_FAILURE); | |
| 180 } | |
| 181 | |
| 182 /* Change protection of MCode area. */ | |
| 183 static void mcode_protect(jit_State *J, int prot) | |
| 184 { | |
| 185 if (J->mcprot != prot) { | |
| 186 if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot))) | |
| 187 mcode_protfail(J); | |
| 188 J->mcprot = prot; | |
| 189 } | |
| 190 } | |
| 191 | |
| 192 #endif | |
| 193 | |
| 194 /* -- MCode area allocation ----------------------------------------------- */ | |
| 195 | |
| 196 #if LJ_64 | |
| 197 #define mcode_validptr(p) (p) | |
| 198 #else | |
| 199 #define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000) | |
| 200 #endif | |
| 201 | |
| 202 #ifdef LJ_TARGET_JUMPRANGE | |
| 203 | |
| 204 /* Get memory within relative jump distance of our code in 64 bit mode. */ | |
| 205 static void *mcode_alloc(jit_State *J, size_t sz) | |
| 206 { | |
| 207 /* Target an address in the static assembler code (64K aligned). | |
| 208 ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB. | |
| 209 ** Use half the jump range so every address in the range can reach any other. | |
| 210 */ | |
| 211 #if LJ_TARGET_MIPS | |
| 212 /* Use the middle of the 256MB-aligned region. */ | |
| 213 uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & | |
| 214 ~(uintptr_t)0x0fffffffu) + 0x08000000u; | |
| 215 #else | |
| 216 uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff; | |
| 217 #endif | |
| 218 const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21); | |
| 219 /* First try a contiguous area below the last one. */ | |
| 220 uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0; | |
| 221 int i; | |
| 222 /* Limit probing iterations, depending on the available pool size. */ | |
| 223 for (i = 0; i < LJ_TARGET_JUMPRANGE; i++) { | |
| 224 if (mcode_validptr(hint)) { | |
| 225 void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN); | |
| 226 | |
| 227 if (mcode_validptr(p) && | |
| 228 ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range)) | |
| 229 return p; | |
| 230 if (p) mcode_free(J, p, sz); /* Free badly placed area. */ | |
| 231 } | |
| 232 /* Next try probing 64K-aligned pseudo-random addresses. */ | |
| 233 do { | |
| 234 hint = lj_prng_u64(&J2G(J)->prng) & ((1u<<LJ_TARGET_JUMPRANGE)-0x10000); | |
| 235 } while (!(hint + sz < range+range)); | |
| 236 hint = target + hint - range; | |
| 237 } | |
| 238 lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */ | |
| 239 return NULL; | |
| 240 } | |
| 241 | |
| 242 #else | |
| 243 | |
| 244 /* All memory addresses are reachable by relative jumps. */ | |
| 245 static void *mcode_alloc(jit_State *J, size_t sz) | |
| 246 { | |
| 247 #if defined(__OpenBSD__) || defined(__NetBSD__) || LJ_TARGET_UWP | |
| 248 /* Allow better executable memory allocation for OpenBSD W^X mode. */ | |
| 249 void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN); | |
| 250 if (p && mcode_setprot(p, sz, MCPROT_GEN)) { | |
| 251 mcode_free(J, p, sz); | |
| 252 return NULL; | |
| 253 } | |
| 254 return p; | |
| 255 #else | |
| 256 return mcode_alloc_at(J, 0, sz, MCPROT_GEN); | |
| 257 #endif | |
| 258 } | |
| 259 | |
| 260 #endif | |
| 261 | |
| 262 /* -- MCode area management ----------------------------------------------- */ | |
| 263 | |
| 264 /* Allocate a new MCode area. */ | |
| 265 static void mcode_allocarea(jit_State *J) | |
| 266 { | |
| 267 MCode *oldarea = J->mcarea; | |
| 268 size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10; | |
| 269 sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); | |
| 270 J->mcarea = (MCode *)mcode_alloc(J, sz); | |
| 271 J->szmcarea = sz; | |
| 272 J->mcprot = MCPROT_GEN; | |
| 273 J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea); | |
| 274 J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink)); | |
| 275 ((MCLink *)J->mcarea)->next = oldarea; | |
| 276 ((MCLink *)J->mcarea)->size = sz; | |
| 277 J->szallmcarea += sz; | |
| 278 J->mcbot = (MCode *)lj_err_register_mcode(J->mcarea, sz, (uint8_t *)J->mcbot); | |
| 279 } | |
| 280 | |
| 281 /* Free all MCode areas. */ | |
| 282 void lj_mcode_free(jit_State *J) | |
| 283 { | |
| 284 MCode *mc = J->mcarea; | |
| 285 J->mcarea = NULL; | |
| 286 J->szallmcarea = 0; | |
| 287 while (mc) { | |
| 288 MCode *next = ((MCLink *)mc)->next; | |
| 289 size_t sz = ((MCLink *)mc)->size; | |
| 290 lj_err_deregister_mcode(mc, sz, (uint8_t *)mc + sizeof(MCLink)); | |
| 291 mcode_free(J, mc, sz); | |
| 292 mc = next; | |
| 293 } | |
| 294 } | |
| 295 | |
| 296 /* -- MCode transactions -------------------------------------------------- */ | |
| 297 | |
| 298 /* Reserve the remainder of the current MCode area. */ | |
| 299 MCode *lj_mcode_reserve(jit_State *J, MCode **lim) | |
| 300 { | |
| 301 if (!J->mcarea) | |
| 302 mcode_allocarea(J); | |
| 303 else | |
| 304 mcode_protect(J, MCPROT_GEN); | |
| 305 *lim = J->mcbot; | |
| 306 return J->mctop; | |
| 307 } | |
| 308 | |
| 309 /* Commit the top part of the current MCode area. */ | |
| 310 void lj_mcode_commit(jit_State *J, MCode *top) | |
| 311 { | |
| 312 J->mctop = top; | |
| 313 mcode_protect(J, MCPROT_RUN); | |
| 314 } | |
| 315 | |
| 316 /* Abort the reservation. */ | |
| 317 void lj_mcode_abort(jit_State *J) | |
| 318 { | |
| 319 if (J->mcarea) | |
| 320 mcode_protect(J, MCPROT_RUN); | |
| 321 } | |
| 322 | |
| 323 /* Set/reset protection to allow patching of MCode areas. */ | |
| 324 MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) | |
| 325 { | |
| 326 if (finish) { | |
| 327 #if LUAJIT_SECURITY_MCODE | |
| 328 if (J->mcarea == ptr) | |
| 329 mcode_protect(J, MCPROT_RUN); | |
| 330 else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN))) | |
| 331 mcode_protfail(J); | |
| 332 #endif | |
| 333 return NULL; | |
| 334 } else { | |
| 335 MCode *mc = J->mcarea; | |
| 336 /* Try current area first to use the protection cache. */ | |
| 337 if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) { | |
| 338 #if LUAJIT_SECURITY_MCODE | |
| 339 mcode_protect(J, MCPROT_GEN); | |
| 340 #endif | |
| 341 return mc; | |
| 342 } | |
| 343 /* Otherwise search through the list of MCode areas. */ | |
| 344 for (;;) { | |
| 345 mc = ((MCLink *)mc)->next; | |
| 346 lj_assertJ(mc != NULL, "broken MCode area chain"); | |
| 347 if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { | |
| 348 #if LUAJIT_SECURITY_MCODE | |
| 349 if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN))) | |
| 350 mcode_protfail(J); | |
| 351 #endif | |
| 352 return mc; | |
| 353 } | |
| 354 } | |
| 355 } | |
| 356 } | |
| 357 | |
| 358 /* Limit of MCode reservation reached. */ | |
| 359 void lj_mcode_limiterr(jit_State *J, size_t need) | |
| 360 { | |
| 361 size_t sizemcode, maxmcode; | |
| 362 lj_mcode_abort(J); | |
| 363 sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10; | |
| 364 sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); | |
| 365 maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10; | |
| 366 if ((size_t)need > sizemcode) | |
| 367 lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */ | |
| 368 if (J->szallmcarea + sizemcode > maxmcode) | |
| 369 lj_trace_err(J, LJ_TRERR_MCODEAL); | |
| 370 mcode_allocarea(J); | |
| 371 lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */ | |
| 372 } | |
| 373 | |
| 374 #endif |